summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig15
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/kernel/pci.c7
-rw-r--r--arch/alpha/kernel/rtc.c8
-rw-r--r--arch/alpha/kernel/sys_nautilus.c4
-rw-r--r--arch/arc/kernel/process.c2
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/boot/dts/am4372.dtsi11
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts1
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts1
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts1
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts3
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts19
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi18
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi45
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi3
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi8
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts8
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi18
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts1
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi26
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/stih416.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts16
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi16
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi16
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi15
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi16
-rw-r--r--arch/arm/common/bL_switcher.c16
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped12
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl12
-rw-r--r--arch/arm/include/asm/cpuidle.h23
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/jump_label.h5
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h15
-rw-r--r--arch/arm/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm/include/asm/mach/time.h3
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kernel/cpuidle.c133
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/time.c6
-rw-r--r--arch/arm/kernel/vdso.c12
-rw-r--r--arch/arm/kvm/Kconfig30
-rw-r--r--arch/arm/kvm/Makefile12
-rw-r--r--arch/arm/kvm/arm.c45
-rw-r--r--arch/arm/kvm/guest.c18
-rw-r--r--arch/arm/kvm/interrupts_head.S8
-rw-r--r--arch/arm/kvm/mmio.c64
-rw-r--r--arch/arm/kvm/mmu.c209
-rw-r--r--arch/arm/kvm/trace.h48
-rw-r--r--arch/arm/mach-davinci/cpuidle.c1
-rw-r--r--arch/arm/mach-dove/pcie.c12
-rw-r--r--arch/arm/mach-exynos/exynos.c15
-rw-r--r--arch/arm/mach-exynos/suspend.c135
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sl.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c1
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c12
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c11
-rw-r--r--arch/arm/mach-omap2/hsmmc.c33
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c128
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.h1
-rw-r--r--arch/arm/mach-omap2/omap4-common.c27
-rw-r--r--arch/arm/mach-orion5x/pci.c32
-rw-r--r--arch/arm/mach-pxa/irq.c111
-rw-r--r--arch/arm/mach-pxa/raumfeld.c4
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-s3c64xx/cpuidle.c2
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c7
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c7
-rw-r--r--arch/arm/mach-sunxi/Kconfig8
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c6
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c11
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c11
-rw-r--r--arch/arm/mach-tegra/iomap.h15
-rw-r--r--arch/arm/mach-tegra/irq.c209
-rw-r--r--arch/arm/mach-tegra/irq.h6
-rw-r--r--arch/arm/mach-tegra/tegra.c1
-rw-r--r--arch/arm/mach-ux500/cpu.c2
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/arm/mm/mmap.c16
-rw-r--r--arch/arm/plat-omap/counter_32k.c20
-rw-r--r--arch/arm/plat-omap/dmtimer.c15
-rw-r--r--arch/arm64/Kconfig16
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi2
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/cpuidle.h9
-rw-r--r--arch/arm64/include/asm/elf.h5
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/jump_label.h8
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h52
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/page.h4
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/arm64/include/asm/pgalloc.h8
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h6
-rw-r--r--arch/arm64/include/asm/pgtable-types.h12
-rw-r--r--arch/arm64/include/asm/pgtable.h8
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/efi.c6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/vdso.c10
-rw-r--r--arch/arm64/kvm/Kconfig18
-rw-r--r--arch/arm64/kvm/Makefile20
-rw-r--r--arch/arm64/mm/dma-mapping.c12
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmap.c20
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/avr32/include/asm/elf.h2
-rw-r--r--arch/blackfin/mach-common/smp.c6
-rw-r--r--arch/c6x/kernel/process.c1
-rw-r--r--arch/frv/kernel/signal.c4
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c6
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig18
-rw-r--r--arch/ia64/include/asm/page.h4
-rw-r--r--arch/ia64/include/asm/pgalloc.h4
-rw-r--r--arch/ia64/include/asm/pgtable.h12
-rw-r--r--arch/ia64/kernel/ivt.S12
-rw-r--r--arch/ia64/kernel/machine_kexec.c4
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/coldfire/pci.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig3
-rw-r--r--arch/m68k/configs/apollo_defconfig3
-rw-r--r--arch/m68k/configs/atari_defconfig3
-rw-r--r--arch/m68k/configs/bvme6000_defconfig3
-rw-r--r--arch/m68k/configs/hp300_defconfig3
-rw-r--r--arch/m68k/configs/mac_defconfig3
-rw-r--r--arch/m68k/configs/multi_defconfig3
-rw-r--r--arch/m68k/configs/mvme147_defconfig3
-rw-r--r--arch/m68k/configs/mvme16x_defconfig3
-rw-r--r--arch/m68k/configs/q40_defconfig5
-rw-r--r--arch/m68k/configs/sun3_defconfig3
-rw-r--r--arch/m68k/configs/sun3x_defconfig3
-rw-r--r--arch/m68k/include/asm/mcfqspi.h5
-rw-r--r--arch/m68k/kernel/pcibios.c2
-rw-r--r--arch/m68k/lib/ashldi3.c7
-rw-r--r--arch/m68k/lib/ashrdi3.c7
-rw-r--r--arch/m68k/lib/divsi3.S7
-rw-r--r--arch/m68k/lib/lshrdi3.c7
-rw-r--r--arch/m68k/lib/modsi3.S7
-rw-r--r--arch/m68k/lib/muldi3.c7
-rw-r--r--arch/m68k/lib/mulsi3.S7
-rw-r--r--arch/m68k/lib/udivsi3.S7
-rw-r--r--arch/m68k/lib/umodsi3.S7
-rw-r--r--arch/m68k/mac/oss.c3
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/metag/include/asm/processor.h1
-rw-r--r--arch/metag/kernel/smp.c5
-rw-r--r--arch/microblaze/kernel/signal.c2
-rw-r--r--arch/microblaze/pci/pci-common.c4
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h218
-rw-r--r--arch/mips/include/asm/elf.h4
-rw-r--r--arch/mips/include/asm/fpu.h20
-rw-r--r--arch/mips/include/asm/jump_label.h7
-rw-r--r--arch/mips/include/asm/kdebug.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h125
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h164
-rw-r--r--arch/mips/kernel/asm-offsets.c105
-rw-r--r--arch/mips/kernel/genex.S15
-rw-r--r--arch/mips/kernel/ptrace.c30
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c33
-rw-r--r--arch/mips/kvm/Makefile8
-rw-r--r--arch/mips/kvm/emulate.c332
-rw-r--r--arch/mips/kvm/fpu.S122
-rw-r--r--arch/mips/kvm/locore.S38
-rw-r--r--arch/mips/kvm/mips.c472
-rw-r--r--arch/mips/kvm/msa.S161
-rw-r--r--arch/mips/kvm/stats.c4
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/kvm/trap_emul.c199
-rw-r--r--arch/mips/lasat/sysctl.c4
-rw-r--r--arch/mips/loongson/loongson-3/hpet.c2
-rw-r--r--arch/mips/mm/mmap.c24
-rw-r--r--arch/mips/pci/pci.c32
-rw-r--r--arch/mn10300/unit-asb2305/pci.c6
-rw-r--r--arch/nios2/include/asm/thread_info.h4
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h9
-rw-r--r--arch/nios2/kernel/entry.S2
-rw-r--r--arch/nios2/kernel/process.c1
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/nios2/mm/cacheflush.c3
-rw-r--r--arch/nios2/mm/fault.c6
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/include/asm/pgalloc.h19
-rw-r--r--arch/parisc/include/asm/pgtable.h16
-rw-r--r--arch/parisc/kernel/entry.S4
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/mpic.c17
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/mm/mmap.c28
-rw-r--r--arch/powerpc/perf/core-book3s.c13
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig88
-rw-r--r--arch/s390/Makefile16
-rw-r--r--arch/s390/boot/compressed/Makefile12
-rw-r--r--arch/s390/boot/compressed/head.S (renamed from arch/s390/boot/compressed/head64.S)0
-rw-r--r--arch/s390/boot/compressed/head31.S51
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S5
-rw-r--r--arch/s390/crypto/crypt_s390.h8
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c4
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/appldata.h24
-rw-r--r--arch/s390/include/asm/atomic.h95
-rw-r--r--arch/s390/include/asm/bitops.h28
-rw-r--r--arch/s390/include/asm/cmpxchg.h7
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/ctl_reg.h14
-rw-r--r--arch/s390/include/asm/elf.h18
-rw-r--r--arch/s390/include/asm/idals.h16
-rw-r--r--arch/s390/include/asm/jump_label.h15
-rw-r--r--arch/s390/include/asm/kvm_host.h46
-rw-r--r--arch/s390/include/asm/livepatch.h43
-rw-r--r--arch/s390/include/asm/lowcore.h159
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/percpu.h4
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h24
-rw-r--r--arch/s390/include/asm/pgtable.h125
-rw-r--r--arch/s390/include/asm/processor.h66
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/runtime_instr.h10
-rw-r--r--arch/s390/include/asm/rwsem.h81
-rw-r--r--arch/s390/include/asm/setup.h35
-rw-r--r--arch/s390/include/asm/sfp-util.h10
-rw-r--r--arch/s390/include/asm/sparsemem.h9
-rw-r--r--arch/s390/include/asm/switch_to.h21
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/include/asm/thread_info.h11
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/types.h17
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--arch/s390/include/uapi/asm/sie.h4
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/base.S76
-rw-r--r--arch/s390/kernel/cache.c4
-rw-r--r--arch/s390/kernel/cpcmd.c10
-rw-r--r--arch/s390/kernel/diag.c15
-rw-r--r--arch/s390/kernel/dis.c48
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S1005
-rw-r--r--arch/s390/kernel/entry64.S1059
-rw-r--r--arch/s390/kernel/ftrace.c73
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S106
-rw-r--r--arch/s390/kernel/head_kdump.S8
-rw-r--r--arch/s390/kernel/ipl.c157
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/jump_label.c2
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/nmi.c92
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/pgm_check.S22
-rw-r--r--arch/s390/kernel/process.c29
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reipl.S133
-rw-r--r--arch/s390/kernel/reipl64.S155
-rw-r--r--arch/s390/kernel/relocate_kernel.S63
-rw-r--r--arch/s390/kernel/relocate_kernel64.S121
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c72
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c36
-rw-r--r--arch/s390/kernel/suspend.c4
-rw-r--r--arch/s390/kernel/swsusp.S (renamed from arch/s390/kernel/swsusp_asm64.S)11
-rw-r--r--arch/s390/kernel/sys_s390.c49
-rw-r--r--arch/s390/kernel/syscalls.S716
-rw-r--r--arch/s390/kernel/time.c20
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/traps.c155
-rw-r--r--arch/s390/kernel/uprobes.c4
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/gaccess.c296
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/guestdbg.c8
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c1101
-rw-r--r--arch/s390/kvm/kvm-s390.c399
-rw-r--r--arch/s390/kvm/kvm-s390.h51
-rw-r--r--arch/s390/kvm/priv.c144
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/s390/kvm/trace-s390.h7
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/div64.c147
-rw-r--r--arch/s390/lib/mem.S (renamed from arch/s390/lib/mem64.S)0
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/qrnnd.S78
-rw-r--r--arch/s390/lib/uaccess.c136
-rw-r--r--arch/s390/lib/ucmpdi2.c26
-rw-r--r--arch/s390/math-emu/Makefile7
-rw-r--r--arch/s390/math-emu/math.c2255
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/maccess.c70
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c59
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/mm/vmem.c10
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/init.c11
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/drivers/pci/pci.c25
-rw-r--r--arch/sh/kernel/dwarf.c18
-rw-r--r--arch/sh/kernel/signal_32.c4
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/include/asm/io_64.h20
-rw-r--r--arch/sparc/include/asm/jump_label.h5
-rw-r--r--arch/sparc/include/asm/starfire.h1
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/leon_pci.c16
-rw-r--r--arch/sparc/kernel/mdesc.c22
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/smp_64.c27
-rw-r--r--arch/sparc/kernel/starfire.c5
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/time_32.c6
-rw-r--r--arch/sparc/kernel/traps_64.c30
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/tile/Kconfig5
-rw-r--r--arch/tile/kernel/pci.c2
-rw-r--r--arch/tile/kernel/pci_gx.c2
-rw-r--r--arch/tile/kernel/time.c24
-rw-r--r--arch/um/Kconfig.um5
-rw-r--r--arch/unicore32/kernel/pci.c9
-rw-r--r--arch/x86/Kconfig68
-rw-r--r--arch/x86/boot/compressed/aslr.c35
-rw-r--r--arch/x86/boot/compressed/head_32.S3
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/boot/compressed/misc.c6
-rw-r--r--arch/x86/boot/compressed/misc.h4
-rw-r--r--arch/x86/boot/string.c2
-rw-r--r--arch/x86/boot/video-mode.c4
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/boot/video.h1
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S4
-rw-r--r--arch/x86/ia32/Makefile1
-rw-r--r--arch/x86/ia32/ia32_signal.c19
-rw-r--r--arch/x86/ia32/ia32entry.S485
-rw-r--r--arch/x86/ia32/nosyscall.c7
-rw-r--r--arch/x86/ia32/sys_ia32.c14
-rw-r--r--arch/x86/ia32/syscall_ia32.c25
-rw-r--r--arch/x86/include/asm/alternative-asm.h53
-rw-r--r--arch/x86/include/asm/alternative.h73
-rw-r--r--arch/x86/include/asm/apic.h3
-rw-r--r--arch/x86/include/asm/barrier.h6
-rw-r--r--arch/x86/include/asm/calling.h284
-rw-r--r--arch/x86/include/asm/compat.h2
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h42
-rw-r--r--arch/x86/include/asm/desc.h7
-rw-r--r--arch/x86/include/asm/dwarf2.h24
-rw-r--r--arch/x86/include/asm/e820.h8
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/include/asm/elf.h11
-rw-r--r--arch/x86/include/asm/fpu-internal.h130
-rw-r--r--arch/x86/include/asm/hw_irq.h5
-rw-r--r--arch/x86/include/asm/insn.h2
-rw-r--r--arch/x86/include/asm/iommu_table.h11
-rw-r--r--arch/x86/include/asm/irqflags.h49
-rw-r--r--arch/x86/include/asm/jump_label.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h28
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/livepatch.h4
-rw-r--r--arch/x86/include/asm/mce.h16
-rw-r--r--arch/x86/include/asm/microcode.h73
-rw-r--r--arch/x86/include/asm/microcode_intel.h13
-rw-r--r--arch/x86/include/asm/mwait.h8
-rw-r--r--arch/x86/include/asm/page_types.h4
-rw-r--r--arch/x86/include/asm/paravirt.h13
-rw-r--r--arch/x86/include/asm/paravirt_types.h8
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/pgalloc.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/pm-trace.h (renamed from arch/x86/include/asm/resume-trace.h)10
-rw-r--r--arch/x86/include/asm/processor.h110
-rw-r--r--arch/x86/include/asm/ptrace.h45
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/segment.h289
-rw-r--r--arch/x86/include/asm/setup.h5
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/sighandling.h4
-rw-r--r--arch/x86/include/asm/smap.h30
-rw-r--r--arch/x86/include/asm/smp.h3
-rw-r--r--arch/x86/include/asm/special_insns.h24
-rw-r--r--arch/x86/include/asm/thread_info.h74
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h18
-rw-r--r--arch/x86/include/uapi/asm/ptrace-abi.h16
-rw-r--r--arch/x86/include/uapi/asm/ptrace.h13
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/alternative.c163
-rw-r--r--arch/x86/kernel/apic/apic.c62
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c22
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c89
-rw-r--r--arch/x86/kernel/asm-offsets_32.c2
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/common.c126
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c715
-rw-r--r--arch/x86/kernel/cpu/intel_pt.h131
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h11
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c66
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c154
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c11
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c63
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c75
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c345
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c22
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c223
-rw-r--r--arch/x86/kernel/cpu/perf_event.h167
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c918
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_bts.c525
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c1379
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c321
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_pt.c1103
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c3
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/devicetree.c4
-rw-r--r--arch/x86/kernel/dumpstack.c15
-rw-r--r--arch/x86/kernel/dumpstack_32.c13
-rw-r--r--arch/x86/kernel/dumpstack_64.c11
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/early_printk.c32
-rw-r--r--arch/x86/kernel/entry_32.S93
-rw-r--r--arch/x86/kernel/entry_64.S972
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/head_32.S3
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/i387.c56
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/kgdb.c6
-rw-r--r--arch/x86/kernel/kprobes/core.c13
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/module.c3
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/kernel/perf_regs.c40
-rw-r--r--arch/x86/kernel/process.c106
-rw-r--r--arch/x86/kernel/process_32.c27
-rw-r--r--arch/x86/kernel/process_64.c24
-rw-r--r--arch/x86/kernel/ptrace.c12
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S8
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S16
-rw-r--r--arch/x86/kernel/setup.c23
-rw-r--r--arch/x86/kernel/signal.c52
-rw-r--r--arch/x86/kernel/smpboot.c77
-rw-r--r--arch/x86/kernel/sys_x86_64.c30
-rw-r--r--arch/x86/kernel/syscall_32.c16
-rw-r--r--arch/x86/kernel/test_rodata.c2
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/traps.c58
-rw-r--r--arch/x86/kernel/uprobes.c2
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c24
-rw-r--r--arch/x86/kernel/xsave.c46
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c33
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c193
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c13
-rw-r--r--arch/x86/kvm/ioapic.c26
-rw-r--r--arch/x86/kvm/ioapic.h11
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/lapic.c150
-rw-r--r--arch/x86/kvm/lapic.h17
-rw-r--r--arch/x86/kvm/mmu.c73
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/vmx.c164
-rw-r--r--arch/x86/kvm/x86.c172
-rw-r--r--arch/x86/lguest/boot.c4
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S50
-rw-r--r--arch/x86/lib/checksum_32.S64
-rw-r--r--arch/x86/lib/clear_page_64.S66
-rw-r--r--arch/x86/lib/copy_page_64.S37
-rw-r--r--arch/x86/lib/copy_user_64.S46
-rw-r--r--arch/x86/lib/csum-copy_64.S2
-rw-r--r--arch/x86/lib/insn.c13
-rw-r--r--arch/x86/lib/memcpy_64.S68
-rw-r--r--arch/x86/lib/memmove_64.S19
-rw-r--r--arch/x86/lib/memset_64.S61
-rw-r--r--arch/x86/lib/msr-reg.S24
-rw-r--r--arch/x86/lib/rwsem.S44
-rw-r--r--arch/x86/lib/thunk_32.S18
-rw-r--r--arch/x86/lib/thunk_64.S28
-rw-r--r--arch/x86/lib/usercopy_64.c15
-rw-r--r--arch/x86/lib/x86-opcode-map.txt9
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init.c69
-rw-r--r--arch/x86/mm/init_64.c14
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/memtest.c118
-rw-r--r--arch/x86/mm/mmap.c38
-rw-r--r--arch/x86/mm/numa.c11
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/mm/pat.c6
-rw-r--r--arch/x86/mm/pgtable.c160
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/pci/common.c36
-rw-r--r--arch/x86/pci/intel_mid_pci.c4
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c4
-rw-r--r--arch/x86/platform/efi/efi.c17
-rw-r--r--arch/x86/platform/efi/efi_32.c22
-rw-r--r--arch/x86/platform/efi/efi_64.c29
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c10
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c4
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c6
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/syscalls/syscall_32.tbl4
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/um/asm/barrier.h4
-rw-r--r--arch/x86/um/sys_call_table_64.c2
-rw-r--r--arch/x86/vdso/Makefile4
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
-rw-r--r--arch/x86/vdso/vdso32/sigreturn.S1
-rw-r--r--arch/x86/vdso/vdso32/syscall.S2
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c14
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/smp.c60
-rw-r--r--arch/x86/xen/suspend.c11
-rw-r--r--arch/x86/xen/xen-asm_64.S8
-rw-r--r--arch/xtensa/kernel/pci.c15
-rw-r--r--arch/xtensa/kernel/signal.c5
643 files changed, 15878 insertions, 13805 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 05d7a8a..e106898 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -446,6 +446,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
bool
+config HAVE_ARCH_HUGE_VMAP
+ bool
+
config HAVE_ARCH_SOFT_DIRTY
bool
@@ -484,6 +487,18 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq
processing.
+config PGTABLE_LEVELS
+ int
+ default 2
+
+config ARCH_HAS_ELF_RANDOMIZE
+ bool
+ help
+ An architecture supports choosing randomized locations for
+ stack, mmap, brk, and ET_DYN. Defined functions:
+ - arch_mmap_rnd()
+ - arch_randomize_brk()
+
#
# ABI hall of shame
#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b7ff9a3..bf9e9d3 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -76,6 +76,10 @@ config GENERIC_ISA_DMA
bool
default y
+config PGTABLE_LEVELS
+ int
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 98a1525..82f738e 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -338,6 +338,8 @@ common_init_pci(void)
bus = pci_scan_root_bus(NULL, next_busno, alpha_mv.pci_ops,
hose, &resources);
+ if (!bus)
+ continue;
hose->bus = bus;
hose->need_domain_info = need_domain_info;
next_busno = bus->busn_res.end + 1;
@@ -353,6 +355,11 @@ common_init_pci(void)
pci_assign_unassigned_resources();
pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
+ for (hose = hose_head; hose; hose = hose->next) {
+ bus = hose->bus;
+ if (bus)
+ pci_bus_add_devices(bus);
+ }
}
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
index c8d284d..f535a3f 100644
--- a/arch/alpha/kernel/rtc.c
+++ b/arch/alpha/kernel/rtc.c
@@ -116,7 +116,7 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
static int
-alpha_rtc_set_mmss(struct device *dev, unsigned long nowtime)
+alpha_rtc_set_mmss(struct device *dev, time64_t nowtime)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
@@ -211,7 +211,7 @@ alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
static const struct rtc_class_ops alpha_rtc_ops = {
.read_time = alpha_rtc_read_time,
.set_time = alpha_rtc_set_time,
- .set_mmss = alpha_rtc_set_mmss,
+ .set_mmss64 = alpha_rtc_set_mmss,
.ioctl = alpha_rtc_ioctl,
};
@@ -276,7 +276,7 @@ do_remote_mmss(void *data)
}
static int
-remote_set_mmss(struct device *dev, unsigned long now)
+remote_set_mmss(struct device *dev, time64_t now)
{
union remote_data x;
if (smp_processor_id() != boot_cpuid) {
@@ -290,7 +290,7 @@ remote_set_mmss(struct device *dev, unsigned long now)
static const struct rtc_class_ops remote_rtc_ops = {
.read_time = remote_read_time,
.set_time = remote_set_time,
- .set_mmss = remote_set_mmss,
+ .set_mmss64 = remote_set_mmss,
.ioctl = alpha_rtc_ioctl,
};
#endif
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 837c0fa..700686d 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -207,6 +207,9 @@ nautilus_init_pci(void)
/* Scan our single hose. */
bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
+ if (!bus)
+ return;
+
hose->bus = bus;
pcibios_claim_one_bus(bus);
@@ -253,6 +256,7 @@ nautilus_init_pci(void)
for the root bus, so just clear it. */
bus->self = NULL;
pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
+ pci_bus_add_devices(bus);
}
/*
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 98c00a2..f46efd1 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -155,8 +155,6 @@ int copy_thread(unsigned long clone_flags,
*/
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
{
- set_fs(USER_DS); /* user space */
-
regs->sp = usp;
regs->ret = pc;
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e..edda76f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
- err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if (!err)
set_current_blocked(&set);
- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
+ /*
+ * Ensure that sigreturn always returns to user mode (in case the
+ * regs saved on user stack got fudged between save and sigreturn)
+ * Otherwise it is easy to panic the kernel with a custom
+ * signal handler and/or restorer which clobberes the status32/ret
+ * to return to a bogus location in kernel mode.
+ */
+ regs->status32 |= STATUS_U_MASK;
+
return regs->r0;
badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* handler returns using sigreturn stub provided already by userpsace
+ * If not, nuke the process right away
*/
- BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
+ if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+ return 1;
+
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
- int ret;
+ int failed;
/* Set up the stack frame */
- ret = setup_rt_frame(ksig, oldset, regs);
+ failed = setup_rt_frame(ksig, oldset, regs);
- signal_setup_done(ret, ksig, 0);
+ signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index e550b11..93c6ea5 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -841,7 +841,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
break;
case DW_CFA_GNU_window_save:
default:
- unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ unw_debug("UNKNOWN OPCODE 0x%x\n", opcode);
result = 0;
break;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0a9dcde..da1266c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1,8 +1,8 @@
config ARM
bool
default y
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -287,6 +287,11 @@ config GENERIC_BUG
def_bool y
depends on BUG
+config PGTABLE_LEVELS
+ int
+ default 3 if ARM_LPAE
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -620,6 +625,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select HAVE_IDE
+ select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 1943fc3..8a099bc1 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -15,7 +15,7 @@
/ {
compatible = "ti,am4372", "ti,am43";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&wakeupgen>;
aliases {
@@ -48,6 +48,15 @@
#interrupt-cells = <3>;
reg = <0x48241000 0x1000>,
<0x48240100 0x0100>;
+ interrupt-parent = <&gic>;
+ };
+
+ wakeupgen: interrupt-controller@48281000 {
+ compatible = "ti,omap4-wugen-mpu";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48281000 0x1000>;
+ interrupt-parent = <&gic>;
};
l2-cache-controller@48242000 {
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index f84d971..26956cb 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -352,7 +352,6 @@
reg = <0x24>;
compatible = "ti,tps65218";
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* NMIn */
- interrupt-parent = <&gic>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 832d243..8ae29c9 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -392,7 +392,6 @@
tps@24 {
compatible = "ti,tps65218";
reg = <0x24>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 257c099..1d71091 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -369,7 +369,6 @@
reg = <0x24>;
compatible = "ti,tps65218";
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* NMIn */
- interrupt-parent = <&gic>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 6463f9e..bd48dba 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -454,7 +454,6 @@
mcp_rtc: rtc@6f {
compatible = "microchip,mcp7941x";
reg = <0x6f>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */
pinctrl-names = "default";
@@ -477,7 +476,7 @@
&uart3 {
status = "okay";
- interrupts-extended = <&gic GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
<&dra7_pmx_core 0x248>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index d3a29c1..afe678f 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -36,6 +36,20 @@
>;
};
+ mmc_pins: pinmux_mmc_pins {
+ pinctrl-single,pins = <
+ DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
+ DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
+ DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
+ DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
+ DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
+ DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
+ DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
+ DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
+ DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
+ >;
+ };
+
usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
};
&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc_pins>;
vmmc-supply = <&vmmcsd_fixed>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c97b5f..f35715b 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -150,17 +150,27 @@
};
gpio1: gpio@48032000 {
- compatible = "ti,omap3-gpio";
+ compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1";
+ ti,gpio-always-on;
reg = <0x48032000 0x1000>;
- interrupts = <97>;
+ interrupts = <96>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio2: gpio@4804c000 {
- compatible = "ti,omap3-gpio";
+ compatible = "ti,omap4-gpio";
ti,hwmods = "gpio2";
+ ti,gpio-always-on;
reg = <0x4804c000 0x1000>;
- interrupts = <99>;
+ interrupts = <98>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpmc: gpmc@50000000 {
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 7563d7c..b1bd06c 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -444,7 +444,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
- interrupts-extended = <&gic GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<&dra7_pmx_core 0x3e0>;
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 127608d..a0afce7 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -13,14 +13,13 @@
#include "skeleton.dtsi"
#define MAX_SOURCES 400
-#define DIRECT_IRQ(irq) (MAX_SOURCES + irq)
/ {
#address-cells = <1>;
#size-cells = <1>;
compatible = "ti,dra7xx";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&crossbar_mpu>;
aliases {
i2c0 = &i2c1;
@@ -50,18 +49,27 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&gic>;
};
gic: interrupt-controller@48211000 {
compatible = "arm,cortex-a15-gic";
interrupt-controller;
#interrupt-cells = <3>;
- arm,routable-irqs = <192>;
reg = <0x48211000 0x1000>,
<0x48212000 0x1000>,
<0x48214000 0x2000>,
<0x48216000 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-parent = <&gic>;
+ };
+
+ wakeupgen: interrupt-controller@48281000 {
+ compatible = "ti,omap5-wugen-mpu", "ti,omap4-wugen-mpu";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48281000 0x1000>;
+ interrupt-parent = <&gic>;
};
/*
@@ -91,8 +99,8 @@
ti,hwmods = "l3_main_1", "l3_main_2";
reg = <0x44000000 0x1000000>,
<0x45000000 0x1000>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI DIRECT_IRQ(10) IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
prm: prm@4ae06000 {
compatible = "ti,dra7-prm";
@@ -344,7 +352,7 @@
uart1: serial@4806a000 {
compatible = "ti,omap4-uart";
reg = <0x4806a000 0x100>;
- interrupts-extended = <&gic GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart1";
clock-frequency = <48000000>;
status = "disabled";
@@ -355,7 +363,7 @@
uart2: serial@4806c000 {
compatible = "ti,omap4-uart";
reg = <0x4806c000 0x100>;
- interrupts-extended = <&gic GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart2";
clock-frequency = <48000000>;
status = "disabled";
@@ -366,7 +374,7 @@
uart3: serial@48020000 {
compatible = "ti,omap4-uart";
reg = <0x48020000 0x100>;
- interrupts-extended = <&gic GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart3";
clock-frequency = <48000000>;
status = "disabled";
@@ -377,7 +385,7 @@
uart4: serial@4806e000 {
compatible = "ti,omap4-uart";
reg = <0x4806e000 0x100>;
- interrupts-extended = <&gic GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart4";
clock-frequency = <48000000>;
status = "disabled";
@@ -388,7 +396,7 @@
uart5: serial@48066000 {
compatible = "ti,omap4-uart";
reg = <0x48066000 0x100>;
- interrupts-extended = <&gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart5";
clock-frequency = <48000000>;
status = "disabled";
@@ -399,7 +407,7 @@
uart6: serial@48068000 {
compatible = "ti,omap4-uart";
reg = <0x48068000 0x100>;
- interrupts-extended = <&gic GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart6";
clock-frequency = <48000000>;
status = "disabled";
@@ -410,7 +418,7 @@
uart7: serial@48420000 {
compatible = "ti,omap4-uart";
reg = <0x48420000 0x100>;
- interrupts-extended = <&gic GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart7";
clock-frequency = <48000000>;
status = "disabled";
@@ -419,7 +427,7 @@
uart8: serial@48422000 {
compatible = "ti,omap4-uart";
reg = <0x48422000 0x100>;
- interrupts-extended = <&gic GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart8";
clock-frequency = <48000000>;
status = "disabled";
@@ -428,7 +436,7 @@
uart9: serial@48424000 {
compatible = "ti,omap4-uart";
reg = <0x48424000 0x100>;
- interrupts-extended = <&gic GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart9";
clock-frequency = <48000000>;
status = "disabled";
@@ -437,7 +445,7 @@
uart10: serial@4ae2b000 {
compatible = "ti,omap4-uart";
reg = <0x4ae2b000 0x100>;
- interrupts-extended = <&gic GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart10";
clock-frequency = <48000000>;
status = "disabled";
@@ -1111,7 +1119,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
- ti,hwmods = "pcie1-phy";
};
pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1137,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
- ti,hwmods = "pcie2-phy";
status = "disabled";
};
};
@@ -1337,9 +1343,12 @@
status = "disabled";
};
- crossbar_mpu: crossbar@4a020000 {
+ crossbar_mpu: crossbar@4a002a48 {
compatible = "ti,irq-crossbar";
reg = <0x4a002a48 0x130>;
+ interrupt-controller;
+ interrupt-parent = <&wakeupgen>;
+ #interrupt-cells = <3>;
ti,max-irqs = <160>;
ti,max-crossbar-sources = <MAX_SOURCES>;
ti,reg-size = <2>;
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 40ed539..daf2811 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -158,7 +158,6 @@
pinctrl-0 = <&tps65917_pins_default>;
interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
- interrupt-parent = <&gic>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index e5a3d23..f7fb0d0 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -25,6 +25,7 @@
pmu {
compatible = "arm,cortex-a15-pmu";
- interrupts = <GIC_SPI DIRECT_IRQ(131) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&wakeupgen>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 10173fa..00eeed7 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -41,8 +41,9 @@
pmu {
compatible = "arm,cortex-a15-pmu";
- interrupts = <GIC_SPI DIRECT_IRQ(131) IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI DIRECT_IRQ(132) IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&wakeupgen>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
};
ocp {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index ac6b0ae..14ab515 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -131,6 +131,9 @@
pmu_system_controller: system-controller@10020000 {
compatible = "samsung,exynos3250-pmu", "syscon";
reg = <0x10020000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
mipi_phy: video-phy@10020710 {
@@ -185,6 +188,7 @@
compatible = "samsung,exynos3250-rtc";
reg = <0x10070000 0x100>;
interrupts = <0 73 0>, <0 74 0>;
+ interrupt-parent = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 77ea547..e20cdc2 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -154,6 +154,9 @@
pmu_system_controller: system-controller@10020000 {
compatible = "samsung,exynos4210-pmu", "syscon";
reg = <0x10020000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
dsi_0: dsi@11C80000 {
@@ -266,6 +269,7 @@
rtc@10070000 {
compatible = "samsung,s3c6410-rtc";
reg = <0x10070000 0x100>;
+ interrupt-parent = <&pmu_system_controller>;
interrupts = <0 44 0>, <0 45 0>;
clocks = <&clock CLK_RTC>;
clock-names = "rtc";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index adbde1a..77f656e 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -205,6 +205,9 @@
clock-names = "clkout16";
clocks = <&clock CLK_FIN_PLL>;
#clock-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
sysreg_system_controller: syscon@10050000 {
@@ -241,6 +244,7 @@
rtc: rtc@101E0000 {
clocks = <&clock CLK_RTC>;
clock-names = "rtc";
+ interrupt-parent = <&pmu_system_controller>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index c0e98cf..b3d2d53 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -327,6 +327,7 @@
rtc: rtc@101E0000 {
clocks = <&clock CLK_RTC>;
clock-names = "rtc";
+ interrupt-parent = <&pmu_system_controller>;
status = "disabled";
};
@@ -770,6 +771,9 @@
clock-names = "clkout16";
clocks = <&clock CLK_FIN_PLL>;
#clock-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
sysreg_system_controller: syscon@10050000 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f4f78c4..3fdc84f 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -92,6 +92,8 @@
ti,hwmods = "aes";
reg = <0x480c5000 0x50>;
interrupts = <0>;
+ dmas = <&sdma 65 &sdma 66>;
+ dma-names = "tx", "rx";
};
prm: prm@48306000 {
@@ -550,6 +552,8 @@
ti,hwmods = "sham";
reg = <0x480c3000 0x64>;
interrupts = <49>;
+ dmas = <&sdma 69>;
+ dma-names = "rx";
};
smartreflex_core: smartreflex@480cb000 {
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index e860ccd..f2a94fa 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -173,14 +173,12 @@
twl: twl@48 {
reg = <0x48>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
- interrupt-parent = <&gic>;
};
twl6040: twl@4b {
compatible = "ti,twl6040";
reg = <0x4b>;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_2N cascaded to gic */
- interrupt-parent = <&gic>;
ti,audpwron-gpio = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* gpio_160 */
vio-supply = <&v1v8>;
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 1505135..7c15fb2 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -372,7 +372,6 @@
reg = <0x48>;
/* IRQ# = 7 */
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
- interrupt-parent = <&gic>;
};
twl6040: twl@4b {
@@ -384,7 +383,6 @@
/* IRQ# = 119 */
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_2N cascaded to gic */
- interrupt-parent = <&gic>;
ti,audpwron-gpio = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio line 127 */
vio-supply = <&v1v8>;
@@ -479,17 +477,17 @@
};
&uart2 {
- interrupts-extended = <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART2_RX>;
};
&uart3 {
- interrupts-extended = <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART3_RX>;
};
&uart4 {
- interrupts-extended = <&gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART4_RX>;
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 3e1da43..8aca8da 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -363,7 +363,6 @@
reg = <0x48>;
/* SPI = 0, IRQ# = 7, 4 = active high level-sensitive */
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
- interrupt-parent = <&gic>;
};
twl6040: twl@4b {
@@ -375,7 +374,6 @@
/* SPI = 0, IRQ# = 119, 4 = active high level-sensitive */
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_2N cascaded to gic */
- interrupt-parent = <&gic>;
ti,audpwron-gpio = <&gpio4 31 0>; /* gpio line 127 */
vio-supply = <&v1v8>;
@@ -570,21 +568,21 @@
};
&uart2 {
- interrupts-extended = <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART2_RX>;
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
};
&uart3 {
- interrupts-extended = <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART3_RX>;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
};
&uart4 {
- interrupts-extended = <&gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+ interrupts-extended = <&wakeupgen GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART4_RX>;
pinctrl-names = "default";
pinctrl-0 = <&uart4_pins>;
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 062701e..a4f1ba2 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -185,7 +185,6 @@
reg = <0x48>;
/* SPI = 0, IRQ# = 7, 4 = active high level-sensitive */
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
- interrupt-parent = <&gic>;
};
twl6040: twl@4b {
@@ -197,7 +196,6 @@
/* SPI = 0, IRQ# = 119, 4 = active high level-sensitive */
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_2N cascaded to gic */
- interrupt-parent = <&gic>;
ti,audpwron-gpio = <&gpio6 22 0>; /* gpio 182 */
vio-supply = <&v1v8>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 87401d9..f2091d1 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -14,7 +14,7 @@
/ {
compatible = "ti,omap4430", "ti,omap4";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&wakeupgen>;
aliases {
i2c0 = &i2c1;
@@ -56,6 +56,7 @@
#interrupt-cells = <3>;
reg = <0x48241000 0x1000>,
<0x48240100 0x0100>;
+ interrupt-parent = <&gic>;
};
L2: l2-cache-controller@48242000 {
@@ -70,6 +71,15 @@
clocks = <&mpu_periphclk>;
reg = <0x48240600 0x20>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-parent = <&gic>;
+ };
+
+ wakeupgen: interrupt-controller@48281000 {
+ compatible = "ti,omap4-wugen-mpu";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48281000 0x1000>;
+ interrupt-parent = <&gic>;
};
/*
@@ -319,7 +329,7 @@
uart2: serial@4806c000 {
compatible = "ti,omap4-uart";
reg = <0x4806c000 0x100>;
- interrupts-extended = <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart2";
clock-frequency = <48000000>;
};
@@ -327,7 +337,7 @@
uart3: serial@48020000 {
compatible = "ti,omap4-uart";
reg = <0x48020000 0x100>;
- interrupts-extended = <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart3";
clock-frequency = <48000000>;
};
@@ -335,7 +345,7 @@
uart4: serial@4806e000 {
compatible = "ti,omap4-uart";
reg = <0x4806e000 0x100>;
- interrupts-extended = <&gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart4";
clock-frequency = <48000000>;
};
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index b54b271..61ad2ea 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -412,7 +412,6 @@
palmas: palmas@48 {
compatible = "ti,palmas";
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
- interrupt-parent = <&gic>;
reg = <0x48>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 159720d..74777a6 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -311,7 +311,6 @@
palmas: palmas@48 {
compatible = "ti,palmas";
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
- interrupt-parent = <&gic>;
reg = <0x48>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -521,7 +520,6 @@
pinctrl-0 = <&twl6040_pins>;
interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
- interrupt-parent = <&gic>;
ti,audpwron-gpio = <&gpio5 13 0>; /* gpio line 141 */
vio-supply = <&smps7_reg>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4a485b6..77b5f70d 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -18,7 +18,7 @@
#size-cells = <1>;
compatible = "ti,omap5";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&wakeupgen>;
aliases {
i2c0 = &i2c1;
@@ -79,6 +79,7 @@
<GIC_PPI 14 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&gic>;
};
pmu {
@@ -95,6 +96,15 @@
<0x48212000 0x1000>,
<0x48214000 0x2000>,
<0x48216000 0x2000>;
+ interrupt-parent = <&gic>;
+ };
+
+ wakeupgen: interrupt-controller@48281000 {
+ compatible = "ti,omap5-wugen-mpu", "ti,omap4-wugen-mpu";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48281000 0x1000>;
+ interrupt-parent = <&gic>;
};
/*
@@ -458,7 +468,7 @@
uart1: serial@4806a000 {
compatible = "ti,omap4-uart";
reg = <0x4806a000 0x100>;
- interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart1";
clock-frequency = <48000000>;
};
@@ -466,7 +476,7 @@
uart2: serial@4806c000 {
compatible = "ti,omap4-uart";
reg = <0x4806c000 0x100>;
- interrupts-extended = <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart2";
clock-frequency = <48000000>;
};
@@ -474,7 +484,7 @@
uart3: serial@48020000 {
compatible = "ti,omap4-uart";
reg = <0x48020000 0x100>;
- interrupts-extended = <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart3";
clock-frequency = <48000000>;
};
@@ -482,7 +492,7 @@
uart4: serial@4806e000 {
compatible = "ti,omap4-uart";
reg = <0x4806e000 0x100>;
- interrupts-extended = <&gic GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart4";
clock-frequency = <48000000>;
};
@@ -490,7 +500,7 @@
uart5: serial@48066000 {
compatible = "ti,omap4-uart";
reg = <0x48066000 0x100>;
- interrupts-extended = <&gic GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart5";
clock-frequency = <48000000>;
};
@@ -498,7 +508,7 @@
uart6: serial@48068000 {
compatible = "ti,omap4-uart";
reg = <0x48068000 0x100>;
- interrupts-extended = <&gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
ti,hwmods = "uart6";
clock-frequency = <48000000>;
};
@@ -883,14 +893,12 @@
usbhsohci: ohci@4a064800 {
compatible = "ti,ohci-omap3";
reg = <0x4a064800 0x400>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
};
usbhsehci: ehci@4a064c00 {
compatible = "ti,ehci-omap";
reg = <0x4a064c00 0x400>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index d771f68..eccc78d 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -411,6 +411,7 @@
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
+ status = "disabled";
};
usb_host0_ehci: usb@ff500000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9d87609..d9176e6 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -660,7 +660,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0xfff01000 0x1000>;
- interrupts = <0 156 4>;
+ interrupts = <0 155 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
status = "disabled";
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index ea28eba..eeb7afe 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -10,7 +10,7 @@
#include "stih416-clock.dtsi"
#include "stih416-pinctrl.dtsi"
-#include <dt-bindings/phy/phy-miphy365x.h>
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset-controller/stih416-resets.h>
/ {
@@ -306,7 +306,7 @@
reg = <0xfe380000 0x1000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>;
interrupt-names = "hostc";
- phys = <&phy_port0 MIPHY_TYPE_SATA>;
+ phys = <&phy_port0 PHY_TYPE_SATA>;
phy-names = "sata-phy";
resets = <&powerdown STIH416_SATA0_POWERDOWN>,
<&softreset STIH416_SATA0_SOFTRESET>;
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index ab7891c..75742f8 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -56,6 +56,22 @@
model = "Olimex A10-OLinuXino-LIME";
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
+ cpus {
+ cpu0: cpu@0 {
+ /*
+ * The A10-Lime is known to be unstable
+ * when running at 1008 MHz
+ */
+ operating-points = <
+ /* kHz uV */
+ 912000 1350000
+ 864000 1300000
+ 624000 1250000
+ >;
+ cooling-max-level = <2>;
+ };
+ };
+
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5c29258..eebb785 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -75,7 +75,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1056000 1500000
1008000 1400000
912000 1350000
864000 1300000
@@ -83,7 +82,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <4>;
+ cooling-max-level = <3>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f8818f1..883cb4873 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -47,7 +47,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1104000 1500000
1008000 1400000
912000 1350000
864000 1300000
@@ -57,7 +56,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <6>;
+ cooling-max-level = <5>;
};
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3a8530b..fdd1817 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -105,7 +105,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1008000 1450000
960000 1400000
912000 1400000
864000 1300000
@@ -116,7 +115,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <7>;
+ cooling-max-level = <6>;
};
cpu@1 {
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 4296b53..f58a3d9 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -8,7 +8,7 @@
/ {
compatible = "nvidia,tegra114";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&lic>;
host1x@50000000 {
compatible = "nvidia,tegra114-host1x", "simple-bus";
@@ -134,6 +134,19 @@
<0x50046000 0x2000>;
interrupts = <GIC_PPI 9
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-parent = <&gic>;
+ };
+
+ lic: interrupt-controller@60004000 {
+ compatible = "nvidia,tegra114-ictlr", "nvidia,tegra30-ictlr";
+ reg = <0x60004000 0x100>,
+ <0x60004100 0x50>,
+ <0x60004200 0x50>,
+ <0x60004300 0x50>,
+ <0x60004400 0x50>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
timer@60005000 {
@@ -766,5 +779,6 @@
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&gic>;
};
};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 4be06c6..db85695 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -10,7 +10,7 @@
/ {
compatible = "nvidia,tegra124";
- interrupt-parent = <&gic>;
+ interrupt-parent = <&lic>;
#address-cells = <2>;
#size-cells = <2>;
@@ -173,6 +173,7 @@
<0x0 0x50046000 0x0 0x2000>;
interrupts = <GIC_PPI 9
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-parent = <&gic>;
};
gpu@0,57000000 {
@@ -190,6 +191,18 @@
status = "disabled";
};
+ lic: interrupt-controller@60004000 {
+ compatible = "nvidia,tegra124-ictlr", "nvidia,tegra30-ictlr";
+ reg = <0x0 0x60004000 0x0 0x100>,
+ <0x0 0x60004100 0x0 0x100>,
+ <0x0 0x60004200 0x0 0x100>,
+ <0x0 0x60004300 0x0 0x100>,
+ <0x0 0x60004400 0x0 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ };
+
timer@0,60005000 {
compatible = "nvidia,tegra124-timer", "nvidia,tegra20-timer";
reg = <0x0 0x60005000 0x0 0x400>;
@@ -955,5 +968,6 @@
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&gic>;
};
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index e5527f7..adf6b04 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -7,7 +7,7 @@
/ {
compatible = "nvidia,tegra20";
- interrupt-parent = <&intc>;
+ interrupt-parent = <&lic>;
host1x@50000000 {
compatible = "nvidia,tegra20-host1x", "simple-bus";
@@ -142,6 +142,7 @@
timer@50040600 {
compatible = "arm,cortex-a9-twd-timer";
+ interrupt-parent = <&intc>;
reg = <0x50040600 0x20>;
interrupts = <GIC_PPI 13
(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
@@ -154,6 +155,7 @@
0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
};
cache-controller@50043000 {
@@ -165,6 +167,17 @@
cache-level = <2>;
};
+ lic: interrupt-controller@60004000 {
+ compatible = "nvidia,tegra20-ictlr";
+ reg = <0x60004000 0x100>,
+ <0x60004100 0x50>,
+ <0x60004200 0x50>,
+ <0x60004300 0x50>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ };
+
timer@60005000 {
compatible = "nvidia,tegra20-timer";
reg = <0x60005000 0x60>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index db4810d..60e205a 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -8,7 +8,7 @@
/ {
compatible = "nvidia,tegra30";
- interrupt-parent = <&intc>;
+ interrupt-parent = <&lic>;
pcie-controller@00003000 {
compatible = "nvidia,tegra30-pcie";
@@ -228,6 +228,7 @@
timer@50040600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x50040600 0x20>;
+ interrupt-parent = <&intc>;
interrupts = <GIC_PPI 13
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&tegra_car TEGRA30_CLK_TWD>;
@@ -239,6 +240,7 @@
0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
};
cache-controller@50043000 {
@@ -250,6 +252,18 @@
cache-level = <2>;
};
+ lic: interrupt-controller@60004000 {
+ compatible = "nvidia,tegra30-ictlr";
+ reg = <0x60004000 0x100>,
+ <0x60004100 0x50>,
+ <0x60004200 0x50>,
+ <0x60004300 0x50>,
+ <0x60004400 0x50>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ };
+
timer@60005000 {
compatible = "nvidia,tegra30-timer", "nvidia,tegra20-timer";
reg = <0x60005000 0x400>;
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 6eaddc4..37dc0fe 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id)
unsigned int mpidr, this_cpu, that_cpu;
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
struct completion inbound_alive;
- struct tick_device *tdev;
- enum clock_event_mode tdev_mode;
long volatile *handshake_ptr;
int ipi_nr, ret;
@@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
/* redirect GIC's SGIs to our counterpart */
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
- tdev = tick_get_device(this_cpu);
- if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
- tdev = NULL;
- if (tdev) {
- tdev_mode = tdev->evtdev->mode;
- clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
- }
+ tick_suspend_local();
ret = cpu_pm_enter();
@@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
ret = cpu_pm_exit();
- if (tdev) {
- clockevents_set_mode(tdev->evtdev, tdev_mode);
- clockevents_program_event(tdev->evtdev,
- tdev->evtdev->next_event, 1);
- }
+ tick_resume_local();
trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
local_fiq_enable();
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 71e5fc7..1d1800f 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,14 +58,18 @@
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ 7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#ifdef __thumb2__
@@ -74,8 +78,6 @@
.code 32
#endif
-.fpu neon
-
.type _bsaes_decrypt8,%function
.align 4
_bsaes_decrypt8:
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
vld1.8 {q8}, [r0] @ initial tweak
adr r2, .Lxts_magic
+#ifndef XTS_CHAIN_TWEAK
tst r9, #0xf @ if not multiple of 16
it ne @ Thumb2 thing, sanity check in ARM
subne r9, #0x10 @ subtract another 16 bytes
+#endif
subs r9, #0x80
blo .Lxts_dec_short
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index be068db..a4d3856 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,14 +701,18 @@ $code.=<<___;
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ 7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#ifdef __thumb2__
@@ -717,8 +721,6 @@ $code.=<<___;
.code 32
#endif
-.fpu neon
-
.type _bsaes_decrypt8,%function
.align 4
_bsaes_decrypt8:
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
vld1.8 {@XMM[8]}, [r0] @ initial tweak
adr $magic, .Lxts_magic
+#ifndef XTS_CHAIN_TWEAK
tst $len, #0xf @ if not multiple of 16
it ne @ Thumb2 thing, sanity check in ARM
subne $len, #0x10 @ subtract another 16 bytes
+#endif
subs $len, #0x80
blo .Lxts_dec_short
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index af319ac..0f84249 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARM_CPUIDLE_H
#define __ASM_ARM_CPUIDLE_H
+#include <asm/proc-fns.h>
+
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
@@ -25,4 +27,25 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
*/
#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
+struct device_node;
+
+struct cpuidle_ops {
+ int (*suspend)(int cpu, unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+};
+
+struct of_cpuidle_method {
+ const char *method;
+ struct cpuidle_ops *ops;
+};
+
+#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
+ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
+ __used __section(__cpuidle_method_of_table) \
+ = { .method = _method, .ops = _ops }
+
+extern int arm_cpuidle_suspend(int index);
+
+extern int arm_cpuidle_init(int cpu);
+
#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index e2f4781..d2315ff 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -127,10 +127,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
#ifdef CONFIG_MMU
#ifdef CONFIG_VDSO
#define ARCH_DLINFO \
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 70f9b9b..5f337dc 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -27,8 +27,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -37,4 +35,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 816db0b..d995821 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -185,6 +185,7 @@
#define HSR_COND (0xfU << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
+#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 41008cd..d71607c 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -27,6 +27,8 @@
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index 3f83db2..d8e90c8 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,28 +28,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index bf0fe99..4cf48c3 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
(__boundary - 1 < (end) - 1)? __boundary: (end); \
})
+#define kvm_pgd_index(addr) pgd_index(addr)
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
-
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(kvm, pudp) (0)
#define KVM_PREALLOC_LEVEL 0
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
+static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
- return 0;
+ return kvm->arch.pgd;
}
-static inline void kvm_free_hwpgd(struct kvm *kvm) { }
-
-static inline void *kvm_get_hwpgd(struct kvm *kvm)
+static inline unsigned int kvm_get_hwpgd_size(void)
{
- return kvm->arch.pgd;
+ return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
struct kvm;
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 90c12e1..0f79e4d 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -12,8 +12,7 @@
extern void timer_tick(void);
-struct timespec;
-typedef void (*clock_access_fn)(struct timespec *);
+typedef void (*clock_access_fn)(struct timespec64 *);
extern int register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 0db25bc..2499867 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 9147008..61bb5a6 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -191,7 +191,6 @@ int main(void)
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
-#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
@@ -201,14 +200,11 @@ int main(void)
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
-#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
-#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
-#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif
BLANK();
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 89545f6..318da33 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -10,8 +10,28 @@
*/
#include <linux/cpuidle.h>
-#include <asm/proc-fns.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/cpuidle.h>
+extern struct of_cpuidle_method __cpuidle_method_of_table[];
+
+static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
+ __used __section(__cpuidle_method_of_table_end);
+
+static struct cpuidle_ops cpuidle_ops[NR_CPUS];
+
+/**
+ * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
+ * @dev: not used
+ * @drv: not used
+ * @index: not used
+ *
+ * A trivial wrapper to allow the cpu_do_idle function to be assigned as a
+ * cpuidle callback by matching the function signature.
+ *
+ * Returns the index passed as parameter
+ */
int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
@@ -19,3 +39,114 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
return index;
}
+
+/**
+ * arm_cpuidle_suspend() - function to enter low power idle states
+ * @index: an integer used as an identifier for the low level PM callbacks
+ *
+ * This function calls the underlying arch specific low level PM code as
+ * registered at the init time.
+ *
+ * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
+ * callback otherwise.
+ */
+int arm_cpuidle_suspend(int index)
+{
+ int ret = -EOPNOTSUPP;
+ int cpu = smp_processor_id();
+
+ if (cpuidle_ops[cpu].suspend)
+ ret = cpuidle_ops[cpu].suspend(cpu, index);
+
+ return ret;
+}
+
+/**
+ * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name
+ * @method: the method name
+ *
+ * Search in the __cpuidle_method_of_table array the cpuidle ops matching the
+ * method name.
+ *
+ * Returns a struct cpuidle_ops pointer, NULL if not found.
+ */
+static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
+{
+ struct of_cpuidle_method *m = __cpuidle_method_of_table;
+
+ for (; m->method; m++)
+ if (!strcmp(m->method, method))
+ return m->ops;
+
+ return NULL;
+}
+
+/**
+ * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree
+ * @dn: a pointer to a struct device node corresponding to a cpu node
+ * @cpu: the cpu identifier
+ *
+ * Get the method name defined in the 'enable-method' property, retrieve the
+ * associated cpuidle_ops and do a struct copy. This copy is needed because all
+ * cpuidle_ops are tagged __initdata and will be unloaded after the init
+ * process.
+ *
+ * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
+ * no cpuidle_ops is registered for the 'enable-method'.
+ */
+static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method;
+ struct cpuidle_ops *ops;
+
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method)
+ return -ENOENT;
+
+ ops = arm_cpuidle_get_ops(enable_method);
+ if (!ops) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ cpuidle_ops[cpu] = *ops; /* structure copy */
+
+ pr_notice("cpuidle: enable-method property '%s'"
+ " found operations\n", enable_method);
+
+ return 0;
+}
+
+/**
+ * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu
+ * @cpu: the cpu to be initialized
+ *
+ * Initialize the cpuidle ops with the device for the cpu and then call
+ * the cpu's idle initialization callback. This may fail if the underlying HW
+ * is not operational.
+ *
+ * Returns:
+ * 0 on success,
+ * -ENODEV if it fails to find the cpu node in the device tree,
+ * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ * -ENOENT if it fails to find an 'enable-method' property,
+ * -ENXIO if the HW reports a failure or a misconfiguration,
+ * -ENOMEM if the HW report an memory allocation failure
+ */
+int __init arm_cpuidle_init(int cpu)
+{
+ struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+ int ret;
+
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = arm_cpuidle_read_ops(cpu_node, cpu);
+ if (!ret && cpuidle_ops[cpu].init)
+ ret = cpuidle_ops[cpu].init(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7fc70ae..dc7d0a9 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Per-cpu breakpoints are not supported by our stepping
* mechanism.
*/
- if (!bp->hw.bp_target)
+ if (!bp->hw.target)
return -EINVAL;
/*
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 0cc7e58..a66e37e 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -76,7 +76,7 @@ void timer_tick(void)
}
#endif
-static void dummy_clock_access(struct timespec *ts)
+static void dummy_clock_access(struct timespec64 *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
@@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts)
static clock_access_fn __read_persistent_clock = dummy_clock_access;
static clock_access_fn __read_boot_clock = dummy_clock_access;;
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock(struct timespec *ts)
+void read_boot_clock64(struct timespec64 *ts)
{
__read_boot_clock(ts);
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 0d31d3c..efe17dd 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -270,7 +270,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
return false;
- if (strcmp(tk->tkr.clock->name, "arch_sys_counter") != 0)
+ if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0)
return false;
return true;
@@ -316,12 +316,12 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
if (vdso_data->tk_is_cntvct) {
- vdso_data->cs_cycle_last = tk->tkr.cycle_last;
+ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_snsec = tk->tkr.xtime_nsec;
- vdso_data->cs_mult = tk->tkr.mult;
- vdso_data->cs_shift = tk->tkr.shift;
- vdso_data->cs_mask = tk->tkr.mask;
+ vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->cs_mult = tk->tkr_mono.mult;
+ vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->cs_mask = tk->tkr_mono.mask;
}
vdso_write_end(vdso_data);
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 338ace7..f1f79d1 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on MMU && OF
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -26,10 +27,12 @@ config KVM
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
- depends on ARM_VIRT_EXT && ARM_LPAE
+ select MMU_NOTIFIER
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
+ depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
- Support hosting virtualized guest machines. You will also
- need to select one or more of the processor modules below.
+ Support hosting virtualized guest machines.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
@@ -37,10 +40,7 @@ config KVM
If unsure, say N.
config KVM_ARM_HOST
- bool "KVM host support for ARM cpus."
- depends on KVM
- depends on MMU
- select MMU_NOTIFIER
+ bool
---help---
Provides host support for ARM processors.
@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool "KVM support for Virtual GIC"
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool "KVM support for Architected Timers"
- depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for the Architected Timers in virtual machines
-
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 443b8be..139e46c 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
-ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
+ccflags-y += -Iarch/arm/kvm
CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I.
@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm
-kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+obj-y += $(KVM)/arm/vgic.o
+obj-y += $(KVM)/arm/vgic-v2.o
+obj-y += $(KVM)/arm/vgic-v2-emul.o
+obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5560f74..6f53645 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-static bool vgic_present;
-
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
- r = vgic_present;
- break;
+ case KVM_CAP_IRQFD:
+ case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM:
+ case KVM_CAP_MP_STATE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return kvm_timer_should_fire(vcpu);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ if (vcpu->arch.pause)
+ mp_state->mp_state = KVM_MP_STATE_STOPPED;
+ else
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ switch (mp_state->mp_state) {
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.pause = false;
+ break;
+ case KVM_MP_STATE_STOPPED:
+ vcpu->arch.pause = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return 0;
}
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return vgic_initialized(kvm);
+}
+
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
- if (!vgic_present)
- return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
- if (vgic_present)
- return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
- else
- return -ENXIO;
+ return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;
@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
if (err)
goto out_free_context;
-#ifdef CONFIG_KVM_ARM_VGIC
- vgic_present = true;
-#endif
-
/*
* Init HYP architected timer support
*/
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 384bab6..d503fbb 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-#ifndef CONFIG_KVM_ARM_TIMER
-
-#define NUM_TIMER_REGS 0
-
-static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- return 0;
-}
-
-static bool is_timer_reg(u64 index)
-{
- return false;
-}
-
-#else
-
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return 0;
}
-#endif
-
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 14d4883..35e4a3a 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
* Assumes vcpu pointer in vcpu reg
*/
.macro save_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
/*
@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
* Assumes vcpu pointer in vcpu reg
*/
.macro restore_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
#define CNTHCTL_PL1PCTEN (1 << 0)
@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
* Clobbers r2-r5
*/
.macro save_timer_state
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
-#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1:
-#endif
.endm
.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 5d3bfc0..974b1c6 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 0;
}
-static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_exit_mmio *mmio)
+static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
{
unsigned long rt;
- int len;
- bool is_write, sign_extend;
+ int access_size;
+ bool sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 1;
}
- len = kvm_vcpu_dabt_get_as(vcpu);
- if (unlikely(len < 0))
- return len;
+ access_size = kvm_vcpu_dabt_get_as(vcpu);
+ if (unlikely(access_size < 0))
+ return access_size;
- is_write = kvm_vcpu_dabt_iswrite(vcpu);
+ *is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu);
- mmio->is_write = is_write;
- mmio->phys_addr = fault_ipa;
- mmio->len = len;
+ *len = access_size;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
- struct kvm_exit_mmio mmio;
unsigned long data;
unsigned long rt;
int ret;
+ bool is_write;
+ int len;
+ u8 data_buf[8];
/*
- * Prepare MMIO operation. First stash it in a private
- * structure that we can use for in-kernel emulation. If the
- * kernel can't handle it, copy it into run->mmio and let user
- * space do its magic.
+ * Prepare MMIO operation. First decode the syndrome data we get
+ * from the CPU. Then try if some in-kernel emulation feels
+ * responsible, otherwise let user space do its magic.
*/
-
if (kvm_vcpu_dabt_isvalid(vcpu)) {
- ret = decode_hsr(vcpu, fault_ipa, &mmio);
+ ret = decode_hsr(vcpu, &is_write, &len);
if (ret)
return ret;
} else {
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
- if (mmio.is_write) {
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
- mmio.len);
+ if (is_write) {
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+ mmio_write_buf(data_buf, len, data);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
- fault_ipa, data);
- mmio_write_buf(mmio.data, mmio.len, data);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
} else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
fault_ipa, 0);
+
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
}
- if (vgic_handle_mmio(vcpu, run, &mmio))
+ /* Now prepare kvm_run for the potential return to userland. */
+ run->mmio.is_write = is_write;
+ run->mmio.phys_addr = fault_ipa;
+ run->mmio.len = len;
+ memcpy(run->mmio.data, data_buf, len);
+
+ if (!ret) {
+ /* We handled the access successfully in the kernel. */
+ kvm_handle_mmio_return(vcpu, run);
return 1;
+ }
- kvm_prepare_mmio(run, &mmio);
+ run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3e6859b..15b050d 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
phys_addr_t addr = start, end = start + size;
phys_addr_t next;
- pgd = pgdp + pgd_index(addr);
+ pgd = pgdp + kvm_pgd_index(addr);
do {
next = kvm_pgd_addr_end(addr, end);
if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
phys_addr_t next;
pgd_t *pgd;
- pgd = kvm->arch.pgd + pgd_index(addr);
+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
do {
next = kvm_pgd_addr_end(addr, end);
stage2_flush_puds(kvm, pgd, addr, next);
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
}
+/* Free the HW pgd, one page at a time */
+static void kvm_free_hwpgd(void *hwpgd)
+{
+ free_pages_exact(hwpgd, kvm_get_hwpgd_size());
+}
+
+/* Allocate the HW PGD, making sure that each page gets its own refcount */
+static void *kvm_alloc_hwpgd(void)
+{
+ unsigned int size = kvm_get_hwpgd_size();
+
+ return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+}
+
/**
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM.
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
*/
int kvm_alloc_stage2_pgd(struct kvm *kvm)
{
- int ret;
pgd_t *pgd;
+ void *hwpgd;
if (kvm->arch.pgd != NULL) {
kvm_err("kvm_arch already initialized?\n");
return -EINVAL;
}
+ hwpgd = kvm_alloc_hwpgd();
+ if (!hwpgd)
+ return -ENOMEM;
+
+ /* When the kernel uses more levels of page tables than the
+ * guest, we allocate a fake PGD and pre-populate it to point
+ * to the next-level page table, which will be the real
+ * initial page table pointed to by the VTTBR.
+ *
+ * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
+ * the PMD and the kernel will use folded pud.
+ * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
+ * pages.
+ */
if (KVM_PREALLOC_LEVEL > 0) {
+ int i;
+
/*
* Allocate fake pgd for the page table manipulation macros to
* work. This is not used by the hardware and we have no
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
*/
pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO);
+
+ if (!pgd) {
+ kvm_free_hwpgd(hwpgd);
+ return -ENOMEM;
+ }
+
+ /* Plug the HW PGD into the fake one. */
+ for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+ if (KVM_PREALLOC_LEVEL == 1)
+ pgd_populate(NULL, pgd + i,
+ (pud_t *)hwpgd + i * PTRS_PER_PUD);
+ else if (KVM_PREALLOC_LEVEL == 2)
+ pud_populate(NULL, pud_offset(pgd, 0) + i,
+ (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+ }
} else {
/*
* Allocate actual first-level Stage-2 page table used by the
* hardware for Stage-2 page table walks.
*/
- pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
+ pgd = (pgd_t *)hwpgd;
}
- if (!pgd)
- return -ENOMEM;
-
- ret = kvm_prealloc_hwpgd(kvm, pgd);
- if (ret)
- goto out_err;
-
kvm_clean_pgd(pgd);
kvm->arch.pgd = pgd;
return 0;
-out_err:
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(pgd);
- else
- free_pages((unsigned long)pgd, S2_PGD_ORDER);
- return ret;
}
/**
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
return;
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
- kvm_free_hwpgd(kvm);
+ kvm_free_hwpgd(kvm_get_hwpgd(kvm));
if (KVM_PREALLOC_LEVEL > 0)
kfree(kvm->arch.pgd);
- else
- free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+
kvm->arch.pgd = NULL;
}
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pgd_t *pgd;
pud_t *pud;
- pgd = kvm->arch.pgd + pgd_index(addr);
+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
if (WARN_ON(pgd_none(*pgd))) {
if (!cache)
return NULL;
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
pgd_t *pgd;
phys_addr_t next;
- pgd = kvm->arch.pgd + pgd_index(addr);
+ pgd = kvm->arch.pgd + kvm_pgd_index(addr);
do {
/*
* Release kvm_mmu_lock periodically if the memory region is
@@ -1299,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock:
spin_unlock(&kvm->mmu_lock);
+ kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret;
}
+/*
+ * Resolve the access fault by making the page young again.
+ * Note that because the faulting entry is guaranteed not to be
+ * cached in the TLB, we don't need to invalidate anything.
+ */
+static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ pfn_t pfn;
+ bool pfn_valid = false;
+
+ trace_kvm_access_fault(fault_ipa);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+
+ pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ goto out;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ *pmd = pmd_mkyoung(*pmd);
+ pfn = pmd_pfn(*pmd);
+ pfn_valid = true;
+ goto out;
+ }
+
+ pte = pte_offset_kernel(pmd, fault_ipa);
+ if (pte_none(*pte)) /* Nothing there either */
+ goto out;
+
+ *pte = pte_mkyoung(*pte); /* Just a page... */
+ pfn = pte_pfn(*pte);
+ pfn_valid = true;
+out:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ if (pfn_valid)
+ kvm_set_pfn_accessed(pfn);
+}
+
/**
* kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer
@@ -1333,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Check the stage-2 fault is trans. fault or write fault */
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
+ if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
+ fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1369,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ if (fault_status == FSC_ACCESS) {
+ handle_access_fault(vcpu, fault_ipa);
+ ret = 1;
+ goto out_unlock;
+ }
+
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0)
ret = 1;
@@ -1377,15 +1456,16 @@ out_unlock:
return ret;
}
-static void handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- void (*handler)(struct kvm *kvm,
- gpa_t gpa, void *data),
- void *data)
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm,
+ gpa_t gpa, void *data),
+ void *data)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ int ret = 0;
slots = kvm_memslots(kvm);
@@ -1409,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
for (; gfn < gfn_end; ++gfn) {
gpa_t gpa = gfn << PAGE_SHIFT;
- handler(kvm, gpa, data);
+ ret |= handler(kvm, gpa, data);
}
}
+
+ return ret;
}
-static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ return 0;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -1442,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0;
}
-static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pte_t *pte = (pte_t *)data;
@@ -1454,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
* through this calling path.
*/
stage2_set_pte(kvm, NULL, gpa, pte, 0);
+ return 0;
}
@@ -1470,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ if (pmd_young(*pmd)) {
+ *pmd = pmd_mkold(*pmd);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (pte_none(*pte))
+ return 0;
+
+ if (pte_young(*pte)) {
+ *pte = pte_mkold(*pte); /* Just a page... */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
+ return pmd_young(*pmd);
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (!pte_none(*pte)) /* Just a page... */
+ return pte_young(*pte);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ trace_kvm_age_hva(start, end);
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_test_age_hva(hva);
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 6817664..0ec3539 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
__entry->hxfar, __entry->vcpu_pc)
);
+TRACE_EVENT(kvm_access_fault,
+ TP_PROTO(unsigned long ipa),
+ TP_ARGS(ipa),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ipa )
+ ),
+
+ TP_fast_assign(
+ __entry->ipa = ipa;
+ ),
+
+ TP_printk("IPA: %lx", __entry->ipa)
+);
+
TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level),
@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
);
+TRACE_EVENT(kvm_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+);
+
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index e365c1b..306ebc5 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -17,7 +17,6 @@
#include <linux/cpuidle.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
#include <mach/cpuidle.h>
diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
index 8a275f2..91fe971 100644
--- a/arch/arm/mach-dove/pcie.c
+++ b/arch/arm/mach-dove/pcie.c
@@ -155,17 +155,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
static struct pci_bus __init *
dove_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
- struct pci_bus *bus;
-
- if (nr < num_pcie_ports) {
- bus = pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
- } else {
- bus = NULL;
+ if (nr >= num_pcie_ports) {
BUG();
+ return NULL;
}
- return bus;
+ return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
+ &sys->resources);
}
static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 9e9dfdf..f44c2e0 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -166,16 +166,14 @@ static void __init exynos_init_io(void)
exynos_map_io();
}
+/*
+ * Apparently, these SoCs are not able to wake-up from suspend using
+ * the PMU. Too bad. Should they suddenly become capable of such a
+ * feat, the matches below should be moved to suspend.c.
+ */
static const struct of_device_id exynos_dt_pmu_match[] = {
- { .compatible = "samsung,exynos3250-pmu" },
- { .compatible = "samsung,exynos4210-pmu" },
- { .compatible = "samsung,exynos4212-pmu" },
- { .compatible = "samsung,exynos4412-pmu" },
- { .compatible = "samsung,exynos4415-pmu" },
- { .compatible = "samsung,exynos5250-pmu" },
{ .compatible = "samsung,exynos5260-pmu" },
{ .compatible = "samsung,exynos5410-pmu" },
- { .compatible = "samsung,exynos5420-pmu" },
{ /*sentinel*/ },
};
@@ -186,9 +184,6 @@ static void exynos_map_pmu(void)
np = of_find_matching_node(NULL, exynos_dt_pmu_match);
if (np)
pmu_base_addr = of_iomap(np, 0);
-
- if (!pmu_base_addr)
- panic("failed to find exynos pmu register\n");
}
static void __init exynos_init_irq(void)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 318d127..2146d91 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -18,7 +18,9 @@
#include <linux/syscore_ops.h>
#include <linux/cpu_pm.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <linux/err.h>
#include <linux/regulator/machine.h>
@@ -43,8 +45,8 @@
#define EXYNOS5420_CPU_STATE 0x28
/**
- * struct exynos_wkup_irq - Exynos GIC to PMU IRQ mapping
- * @hwirq: Hardware IRQ signal of the GIC
+ * struct exynos_wkup_irq - PMU IRQ to mask mapping
+ * @hwirq: Hardware IRQ signal of the PMU
* @mask: Mask in PMU wake-up mask register
*/
struct exynos_wkup_irq {
@@ -93,14 +95,14 @@ static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
};
static const struct exynos_wkup_irq exynos4_wkup_irq[] = {
- { 76, BIT(1) }, /* RTC alarm */
- { 77, BIT(2) }, /* RTC tick */
+ { 44, BIT(1) }, /* RTC alarm */
+ { 45, BIT(2) }, /* RTC tick */
{ /* sentinel */ },
};
static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
- { 75, BIT(1) }, /* RTC alarm */
- { 76, BIT(2) }, /* RTC tick */
+ { 43, BIT(1) }, /* RTC alarm */
+ { 44, BIT(2) }, /* RTC tick */
{ /* sentinel */ },
};
@@ -167,6 +169,113 @@ static int exynos_irq_set_wake(struct irq_data *data, unsigned int state)
return -ENOENT;
}
+static struct irq_chip exynos_pmu_chip = {
+ .name = "PMU",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = exynos_irq_set_wake,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int exynos_pmu_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (domain->of_node != controller)
+ return -EINVAL; /* Shouldn't happen, really... */
+ if (intsize != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (intspec[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int exynos_pmu_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
+ int i;
+
+ if (args->args_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (args->args[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = args->args[1];
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &exynos_pmu_chip, NULL);
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops exynos_pmu_domain_ops = {
+ .xlate = exynos_pmu_domain_xlate,
+ .alloc = exynos_pmu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init exynos_pmu_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ return -ENXIO;
+ }
+
+ pmu_base_addr = of_iomap(node, 0);
+
+ if (!pmu_base_addr) {
+ pr_err("%s: failed to find exynos pmu register\n",
+ node->full_name);
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
+ node, &exynos_pmu_domain_ops,
+ NULL);
+ if (!domain) {
+ iounmap(pmu_base_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define EXYNOS_PMU_IRQ(symbol, name) OF_DECLARE_2(irqchip, symbol, name, exynos_pmu_irq_init)
+
+EXYNOS_PMU_IRQ(exynos3250_pmu_irq, "samsung,exynos3250-pmu");
+EXYNOS_PMU_IRQ(exynos4210_pmu_irq, "samsung,exynos4210-pmu");
+EXYNOS_PMU_IRQ(exynos4212_pmu_irq, "samsung,exynos4212-pmu");
+EXYNOS_PMU_IRQ(exynos4412_pmu_irq, "samsung,exynos4412-pmu");
+EXYNOS_PMU_IRQ(exynos4415_pmu_irq, "samsung,exynos4415-pmu");
+EXYNOS_PMU_IRQ(exynos5250_pmu_irq, "samsung,exynos5250-pmu");
+EXYNOS_PMU_IRQ(exynos5420_pmu_irq, "samsung,exynos5420-pmu");
+
static int exynos_cpu_do_idle(void)
{
/* issue the standby signal into the pm unit. */
@@ -615,17 +724,19 @@ static struct syscore_ops exynos_pm_syscore_ops;
void __init exynos_pm_init(void)
{
const struct of_device_id *match;
+ struct device_node *np;
u32 tmp;
- of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
- if (!match) {
+ np = of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
+ if (!np) {
pr_err("Failed to find PMU node\n");
return;
}
- pm_data = (struct exynos_pm_data *) match->data;
- /* Platform-specific GIC callback */
- gic_arch_extn.irq_set_wake = exynos_irq_set_wake;
+ if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
+ pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+
+ pm_data = (struct exynos_pm_data *) match->data;
/* All wakeup disable */
tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index e8627e0..c8dffce 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -631,6 +631,7 @@ config SOC_IMX6SX
config SOC_VF610
bool "Vybrid Family VF610 support"
+ select IRQ_DOMAIN_HIERARCHY
select ARM_GIC
select PINCTRL_VF610
select PL310_ERRATA_769419 if CACHE_L2X0
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index d76d086..8e21ccc 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -9,7 +9,6 @@
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "cpuidle.h"
diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c
index 7d92e65..5742a9f 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sl.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sl.c
@@ -9,7 +9,6 @@
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "cpuidle.h"
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 5a36722..2c9f1a8 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -10,7 +10,6 @@
#include <linux/cpu_pm.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include "common.h"
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index 445e553..097ea4c 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -197,17 +197,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
static struct pci_bus __init *
mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
- struct pci_bus *bus;
-
- if (nr < num_pcie_ports) {
- bus = pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
- } else {
- bus = NULL;
+ if (nr >= num_pcie_ports) {
BUG();
+ return NULL;
}
- return bus;
+ return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
+ &sys->resources);
}
static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 01e398a..4b8e9f4 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -14,10 +14,9 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include "common.h"
#include "pm.h"
@@ -84,7 +83,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{
struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0;
- int cpu_id = smp_processor_id();
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -112,7 +110,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF);
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+ tick_broadcast_enter();
/*
* Call idle CPU PM enter notifier chain so that
@@ -169,7 +167,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+ tick_broadcast_exit();
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
@@ -184,8 +182,7 @@ fail:
*/
static void omap_setup_broadcast_timer(void *arg)
{
- int cpu = smp_processor_id();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+ tick_broadcast_enable();
}
static struct cpuidle_driver omap4_idle_driver = {
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index dc6e79c..9a8611a 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -150,9 +150,13 @@ static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
static inline void omap_hsmmc_mux(struct omap_hsmmc_platform_data
*mmc_controller, int controller_nr)
{
- if (gpio_is_valid(mmc_controller->switch_pin) &&
- (mmc_controller->switch_pin < OMAP_MAX_GPIO_LINES))
- omap_mux_init_gpio(mmc_controller->switch_pin,
+ if (gpio_is_valid(mmc_controller->gpio_cd) &&
+ (mmc_controller->gpio_cd < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->gpio_cd,
+ OMAP_PIN_INPUT_PULLUP);
+ if (gpio_is_valid(mmc_controller->gpio_cod) &&
+ (mmc_controller->gpio_cod < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->gpio_cod,
OMAP_PIN_INPUT_PULLUP);
if (gpio_is_valid(mmc_controller->gpio_wp) &&
(mmc_controller->gpio_wp < OMAP_MAX_GPIO_LINES))
@@ -250,15 +254,20 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
mmc->internal_clock = !c->ext_clock;
mmc->reg_offset = 0;
- mmc->switch_pin = c->gpio_cd;
+ if (c->cover_only) {
+ /* detect if mobile phone cover removed */
+ mmc->gpio_cd = -EINVAL;
+ mmc->gpio_cod = c->gpio_cd;
+ } else {
+ /* card detect pin on the mmc socket itself */
+ mmc->gpio_cd = c->gpio_cd;
+ mmc->gpio_cod = -EINVAL;
+ }
mmc->gpio_wp = c->gpio_wp;
mmc->remux = c->remux;
mmc->init_card = c->init_card;
- if (c->cover_only)
- mmc->cover = 1;
-
if (c->nonremovable)
mmc->nonremovable = 1;
@@ -358,7 +367,15 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
if (!mmc_pdata)
continue;
- mmc_pdata->switch_pin = c->gpio_cd;
+ if (c->cover_only) {
+ /* detect if mobile phone cover removed */
+ mmc_pdata->gpio_cd = -EINVAL;
+ mmc_pdata->gpio_cod = c->gpio_cd;
+ } else {
+ /* card detect pin on the mmc socket itself */
+ mmc_pdata->gpio_cd = c->gpio_cd;
+ mmc_pdata->gpio_cod = -EINVAL;
+ }
mmc_pdata->gpio_wp = c->gpio_wp;
res = omap_device_register(pdev);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2a2f4d5..25f1bee 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "OMAP4");
else if (soc_is_omap54xx())
return kasprintf(GFP_KERNEL, "OMAP5");
+ else if (soc_is_am33xx() || soc_is_am335x())
+ return kasprintf(GFP_KERNEL, "AM33xx");
else if (soc_is_am43xx())
return kasprintf(GFP_KERNEL, "AM43xx");
else if (soc_is_dra7xx())
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f961c46..3b56722 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -20,11 +20,12 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/cpu_pm.h>
-#include <linux/irqchip/arm-gic.h>
#include "omap-wakeupgen.h"
#include "omap-secure.h"
@@ -78,29 +79,12 @@ static inline void sar_writel(u32 val, u32 offset, u8 idx)
static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
{
- unsigned int spi_irq;
-
- /*
- * PPIs and SGIs are not supported.
- */
- if (irq < OMAP44XX_IRQ_GIC_START)
- return -EINVAL;
-
- /*
- * Subtract the GIC offset.
- */
- spi_irq = irq - OMAP44XX_IRQ_GIC_START;
- if (spi_irq > MAX_IRQS) {
- pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
- return -EINVAL;
- }
-
/*
* Each WakeupGen register controls 32 interrupt.
* i.e. 1 bit per SPI IRQ
*/
- *reg_index = spi_irq >> 5;
- *bit_posn = spi_irq %= 32;
+ *reg_index = irq >> 5;
+ *bit_posn = irq %= 32;
return 0;
}
@@ -141,6 +125,7 @@ static void wakeupgen_mask(struct irq_data *d)
raw_spin_lock_irqsave(&wakeupgen_lock, flags);
_wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ irq_chip_mask_parent(d);
}
/*
@@ -153,6 +138,7 @@ static void wakeupgen_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&wakeupgen_lock, flags);
_wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ irq_chip_unmask_parent(d);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -400,15 +386,91 @@ int omap_secure_apis_support(void)
return omap_secure_apis;
}
+static struct irq_chip wakeupgen_chip = {
+ .name = "WUGEN",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = wakeupgen_mask,
+ .irq_unmask = wakeupgen_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int wakeupgen_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (domain->of_node != controller)
+ return -EINVAL; /* Shouldn't happen, really... */
+ if (intsize != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (intspec[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int wakeupgen_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
+ int i;
+
+ if (args->args_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (args->args[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = args->args[1];
+ if (hwirq >= MAX_IRQS)
+ return -EINVAL; /* Can't deal with this */
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &wakeupgen_chip, NULL);
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops wakeupgen_domain_ops = {
+ .xlate = wakeupgen_domain_xlate,
+ .alloc = wakeupgen_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
/*
* Initialise the wakeupgen module.
*/
-int __init omap_wakeupgen_init(void)
+static int __init wakeupgen_init(struct device_node *node,
+ struct device_node *parent)
{
+ struct irq_domain *parent_domain, *domain;
int i;
unsigned int boot_cpu = smp_processor_id();
u32 val;
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ return -ENXIO;
+ }
/* Not supported on OMAP4 ES1.0 silicon */
if (omap_rev() == OMAP4430_REV_ES1_0) {
WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
@@ -416,7 +478,7 @@ int __init omap_wakeupgen_init(void)
}
/* Static mapping, never released */
- wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
+ wakeupgen_base = of_iomap(node, 0);
if (WARN_ON(!wakeupgen_base))
return -ENOMEM;
@@ -429,6 +491,14 @@ int __init omap_wakeupgen_init(void)
max_irqs = AM43XX_IRQS;
}
+ domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
+ node, &wakeupgen_domain_ops,
+ NULL);
+ if (!domain) {
+ iounmap(wakeupgen_base);
+ return -ENOMEM;
+ }
+
/* Clear all IRQ bitmasks at wakeupGen level */
for (i = 0; i < irq_banks; i++) {
wakeupgen_writel(0, i, CPU0_ID);
@@ -437,14 +507,6 @@ int __init omap_wakeupgen_init(void)
}
/*
- * Override GIC architecture specific functions to add
- * OMAP WakeupGen interrupt controller along with GIC
- */
- gic_arch_extn.irq_mask = wakeupgen_mask;
- gic_arch_extn.irq_unmask = wakeupgen_unmask;
- gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
-
- /*
* FIXME: Add support to set_smp_affinity() once the core
* GIC code has necessary hooks in place.
*/
@@ -474,3 +536,9 @@ int __init omap_wakeupgen_init(void)
return 0;
}
+
+/*
+ * We cannot use the IRQCHIP_DECLARE macro that lives in
+ * drivers/irqchip, so we're forced to roll our own. Not very nice.
+ */
+OF_DECLARE_2(irqchip, ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.h b/arch/arm/mach-omap2/omap-wakeupgen.h
index b3c8ecc..a3491ad 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/omap-wakeupgen.h
@@ -33,7 +33,6 @@
#define OMAP_TIMESTAMPCYCLELO 0xc08
#define OMAP_TIMESTAMPCYCLEHI 0xc0c
-extern int __init omap_wakeupgen_init(void);
extern void __iomem *omap_get_wakeupgen_base(void);
extern int omap_secure_apis_support(void);
#endif
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index cee0fe1..7bb116a 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -22,7 +22,6 @@
#include <linux/of_platform.h>
#include <linux/export.h>
#include <linux/irqchip/arm-gic.h>
-#include <linux/irqchip/irq-crossbar.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
#include <linux/genalloc.h>
@@ -242,26 +241,26 @@ static int __init omap4_sar_ram_init(void)
}
omap_early_initcall(omap4_sar_ram_init);
-static const struct of_device_id gic_match[] = {
- { .compatible = "arm,cortex-a9-gic", },
- { .compatible = "arm,cortex-a15-gic", },
+static const struct of_device_id intc_match[] = {
+ { .compatible = "ti,omap4-wugen-mpu", },
+ { .compatible = "ti,omap5-wugen-mpu", },
{ },
};
-static struct device_node *gic_node;
+static struct device_node *intc_node;
unsigned int omap4_xlate_irq(unsigned int hwirq)
{
struct of_phandle_args irq_data;
unsigned int irq;
- if (!gic_node)
- gic_node = of_find_matching_node(NULL, gic_match);
+ if (!intc_node)
+ intc_node = of_find_matching_node(NULL, intc_match);
- if (WARN_ON(!gic_node))
+ if (WARN_ON(!intc_node))
return hwirq;
- irq_data.np = gic_node;
+ irq_data.np = intc_node;
irq_data.args_count = 3;
irq_data.args[0] = 0;
irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
@@ -278,6 +277,12 @@ void __init omap_gic_of_init(void)
{
struct device_node *np;
+ intc_node = of_find_matching_node(NULL, intc_match);
+ if (WARN_ON(!intc_node)) {
+ pr_err("No WUGEN found in DT, system will misbehave.\n");
+ pr_err("UPDATE YOUR DEVICE TREE!\n");
+ }
+
/* Extract GIC distributor and TWD bases for OMAP4460 ROM Errata WA */
if (!cpu_is_omap446x())
goto skip_errata_init;
@@ -291,9 +296,5 @@ void __init omap_gic_of_init(void)
WARN_ON(!twd_base);
skip_errata_init:
- omap_wakeupgen_init();
-#ifdef CONFIG_IRQ_CROSSBAR
- irqcrossbar_init();
-#endif
irqchip_init();
}
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 87a12d6..b02f394 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -540,37 +540,33 @@ void __init orion5x_pci_set_cardbus_mode(void)
int __init orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys)
{
- int ret = 0;
-
vga_base = ORION5X_PCIE_MEM_PHYS_BASE;
if (nr == 0) {
orion_pcie_set_local_bus_nr(PCIE_BASE, sys->busnr);
- ret = pcie_setup(sys);
- } else if (nr == 1 && !orion5x_pci_disabled) {
+ return pcie_setup(sys);
+ }
+
+ if (nr == 1 && !orion5x_pci_disabled) {
orion5x_pci_set_bus_nr(sys->busnr);
- ret = pci_setup(sys);
+ return pci_setup(sys);
}
- return ret;
+ return 0;
}
struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys)
{
- struct pci_bus *bus;
+ if (nr == 0)
+ return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
+ &sys->resources);
- if (nr == 0) {
- bus = pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
- } else if (nr == 1 && !orion5x_pci_disabled) {
- bus = pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys,
- &sys->resources);
- } else {
- bus = NULL;
- BUG();
- }
+ if (nr == 1 && !orion5x_pci_disabled)
+ return pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys,
+ &sys->resources);
- return bus;
+ BUG();
+ return NULL;
}
int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 0eecd83..89a7c06 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
-#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
@@ -51,6 +51,7 @@
static void __iomem *pxa_irq_base;
static int pxa_internal_irq_nr;
static bool cpu_has_ipr;
+static struct irq_domain *pxa_irq_domain;
static inline void __iomem *irq_base(int i)
{
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void pxa_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
- icmr &= ~(1 << IRQ_BIT(d->irq));
+ icmr &= ~BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
void pxa_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
- icmr |= 1 << IRQ_BIT(d->irq);
+ icmr |= BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1);
}
-void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
{
- int irq, i, n;
+ void __iomem *base = irq_base(hw / 32);
- BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+ /* initialize interrupt priority */
+ if (cpu_has_ipr)
+ __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
+
+ irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, base);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops pxa_irq_ops = {
+ .map = pxa_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static __init void
+pxa_init_irq_common(struct device_node *node, int irq_nr,
+ int (*fn)(struct irq_data *, unsigned int))
+{
+ int n;
pxa_internal_irq_nr = irq_nr;
- cpu_has_ipr = !cpu_is_pxa25x();
- pxa_irq_base = io_p2v(0x40d00000);
+ pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
+ PXA_IRQ(0), 0,
+ &pxa_irq_ops, NULL);
+ if (!pxa_irq_domain)
+ panic("Unable to add PXA IRQ domain\n");
+ irq_set_default_host(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
- for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
- /* initialize interrupt priority */
- if (cpu_has_ipr)
- __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
-
- irq = PXA_IRQ(i);
- irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, base);
- set_irq_flags(irq, IRQF_VALID);
- }
}
-
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
}
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+{
+ BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+
+ pxa_irq_base = io_p2v(0x40d00000);
+ cpu_has_ipr = !cpu_is_pxa25x();
+ pxa_init_irq_common(NULL, irq_nr, fn);
+}
+
#ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
};
#ifdef CONFIG_OF
-static struct irq_domain *pxa_irq_domain;
-
-static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- void __iomem *base = irq_base(hw / 32);
-
- /* initialize interrupt priority */
- if (cpu_has_ipr)
- __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
-
- irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
- handle_level_irq);
- irq_set_chip_data(hw, base);
- set_irq_flags(hw, IRQF_VALID);
-
- return 0;
-}
-
-static struct irq_domain_ops pxa_irq_ops = {
- .map = pxa_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static const struct of_device_id intc_ids[] __initconst = {
{ .compatible = "marvell,pxa-intc", },
{}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{
struct device_node *node;
struct resource res;
- int n, ret;
+ int ret;
node = of_find_matching_node(NULL, intc_ids);
if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return;
}
- pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
- &pxa_irq_ops, NULL);
- if (!pxa_irq_domain)
- panic("Unable to add PXA IRQ domain\n");
-
- irq_set_default_host(pxa_irq_domain);
-
- for (n = 0; n < pxa_internal_irq_nr; n += 32) {
- void __iomem *base = irq_base(n >> 5);
-
- __raw_writel(0, base + ICMR); /* disable all IRQs */
- __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
- }
-
- /* only unmasked interrupts kick us out of idle */
- __raw_writel(1, irq_base(0) + ICCR);
-
- pxa_internal_irq_chip.irq_set_wake = fn;
+ pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
}
#endif /* CONFIG_OF */
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index a762b23..6dc4f02 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -758,8 +758,10 @@ static void raumfeld_power_signal_charged(void)
struct power_supply *psy =
power_supply_get_by_name(raumfeld_power_supplicants[0]);
- if (psy)
+ if (psy) {
power_supply_set_battery_charged(psy);
+ power_supply_put(psy);
+ }
}
static int raumfeld_power_resume(void)
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 205f9bf..ac2ae5c 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
};
static struct platform_device can_regulator_device = {
- .name = "reg-fixed-volage",
+ .name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &can_regulator_pdata,
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 2eb0724..93aa8cb 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <linux/time.h>
-#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
#include <mach/map.h>
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 9e36180..fd63ae6 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -252,11 +252,6 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
-{
- return 0; /* always allow wakeup */
-}
-
#define PINTER0_PHYS 0xe69000a0
#define PINTER1_PHYS 0xe69000a4
#define PINTER0_VIRT IOMEM(0xe69000a0)
@@ -318,8 +313,8 @@ void __init sh73a0_init_irq(void)
void __iomem *gic_cpu_base = IOMEM(0xf0000100);
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
+ gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
- gic_arch_extn.irq_set_wake = sh73a0_set_wake;
register_intc_controller(&intcs_desc);
register_intc_controller(&intc_pint0_desc);
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 27dceaf9..c03e562 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -713,18 +713,13 @@ void __init r8a7779_init_late(void)
}
#ifdef CONFIG_USE_OF
-static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
-{
- return 0; /* always allow wakeup */
-}
-
void __init r8a7779_init_irq_dt(void)
{
#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
#endif
- gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+ gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE);
#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
gic_init(0, 29, gic_dist_base, gic_cpu_base);
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index a77604f..81502b9 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,12 @@
menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select PINCTRL
select SUN4I_TIMER
+ select RESET_CONTROLLER
if ARCH_SUNXI
@@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
- select RESET_CONTROLLER
select SUN5I_HSTIMER
config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
- select RESET_CONTROLLER
config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
- select RESET_CONTROLLER
endif
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index f2b586d..155807f 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -15,7 +15,7 @@
*/
#include <asm/firmware.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
@@ -44,7 +44,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
tegra_set_cpu_in_lp2();
cpu_pm_enter();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ tick_broadcast_enter();
call_firmware_op(prepare_idle);
@@ -52,7 +52,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
if (call_firmware_op(do_idle, 0) == -ENOSYS)
cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ tick_broadcast_exit();
cpu_pm_exit();
tegra_clear_cpu_in_lp2();
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 4f25a7c..88de2dc 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -20,14 +20,13 @@
*/
#include <linux/clk/tegra.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
@@ -136,11 +135,11 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev,
if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready())
return false;
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ tick_broadcast_enter();
tegra_idle_lp2_last();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ tick_broadcast_exit();
if (cpu_online(1))
tegra20_wake_cpu1_from_reset();
@@ -153,13 +152,13 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ tick_broadcast_enter();
cpu_suspend(0, tegra20_sleep_cpu_secondary_finish);
tegra20_cpu_clear_resettable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ tick_broadcast_exit();
return true;
}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index f8815ed..4dbe1da 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -20,14 +20,13 @@
*/
#include <linux/clk/tegra.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
@@ -76,11 +75,11 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
return false;
}
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ tick_broadcast_enter();
tegra_idle_lp2_last();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ tick_broadcast_exit();
return true;
}
@@ -90,13 +89,13 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+ tick_broadcast_enter();
smp_wmb();
cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+ tick_broadcast_exit();
return true;
}
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index ee79808..81dc950 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -31,21 +31,6 @@
#define TEGRA_ARM_INT_DIST_BASE 0x50041000
#define TEGRA_ARM_INT_DIST_SIZE SZ_4K
-#define TEGRA_PRIMARY_ICTLR_BASE 0x60004000
-#define TEGRA_PRIMARY_ICTLR_SIZE SZ_64
-
-#define TEGRA_SECONDARY_ICTLR_BASE 0x60004100
-#define TEGRA_SECONDARY_ICTLR_SIZE SZ_64
-
-#define TEGRA_TERTIARY_ICTLR_BASE 0x60004200
-#define TEGRA_TERTIARY_ICTLR_SIZE SZ_64
-
-#define TEGRA_QUATERNARY_ICTLR_BASE 0x60004300
-#define TEGRA_QUATERNARY_ICTLR_SIZE SZ_64
-
-#define TEGRA_QUINARY_ICTLR_BASE 0x60004400
-#define TEGRA_QUINARY_ICTLR_SIZE SZ_64
-
#define TEGRA_TMR1_BASE 0x60005000
#define TEGRA_TMR1_SIZE SZ_8
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index ab95f53..3b9098d 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -30,43 +30,9 @@
#include "board.h"
#include "iomap.h"
-#define ICTLR_CPU_IEP_VFIQ 0x08
-#define ICTLR_CPU_IEP_FIR 0x14
-#define ICTLR_CPU_IEP_FIR_SET 0x18
-#define ICTLR_CPU_IEP_FIR_CLR 0x1c
-
-#define ICTLR_CPU_IER 0x20
-#define ICTLR_CPU_IER_SET 0x24
-#define ICTLR_CPU_IER_CLR 0x28
-#define ICTLR_CPU_IEP_CLASS 0x2C
-
-#define ICTLR_COP_IER 0x30
-#define ICTLR_COP_IER_SET 0x34
-#define ICTLR_COP_IER_CLR 0x38
-#define ICTLR_COP_IEP_CLASS 0x3c
-
-#define FIRST_LEGACY_IRQ 32
-#define TEGRA_MAX_NUM_ICTLRS 5
-
#define SGI_MASK 0xFFFF
-static int num_ictlrs;
-
-static void __iomem *ictlr_reg_base[] = {
- IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
- IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
- IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
- IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
- IO_ADDRESS(TEGRA_QUINARY_ICTLR_BASE),
-};
-
#ifdef CONFIG_PM_SLEEP
-static u32 cop_ier[TEGRA_MAX_NUM_ICTLRS];
-static u32 cop_iep[TEGRA_MAX_NUM_ICTLRS];
-static u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS];
-static u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS];
-
-static u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS];
static void __iomem *tegra_gic_cpu_base;
#endif
@@ -83,140 +49,7 @@ bool tegra_pending_sgi(void)
return false;
}
-static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
-{
- void __iomem *base;
- u32 mask;
-
- BUG_ON(irq < FIRST_LEGACY_IRQ ||
- irq >= FIRST_LEGACY_IRQ + num_ictlrs * 32);
-
- base = ictlr_reg_base[(irq - FIRST_LEGACY_IRQ) / 32];
- mask = BIT((irq - FIRST_LEGACY_IRQ) % 32);
-
- __raw_writel(mask, base + reg);
-}
-
-static void tegra_mask(struct irq_data *d)
-{
- if (d->hwirq < FIRST_LEGACY_IRQ)
- return;
-
- tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
-}
-
-static void tegra_unmask(struct irq_data *d)
-{
- if (d->hwirq < FIRST_LEGACY_IRQ)
- return;
-
- tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
-}
-
-static void tegra_ack(struct irq_data *d)
-{
- if (d->hwirq < FIRST_LEGACY_IRQ)
- return;
-
- tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
-}
-
-static void tegra_eoi(struct irq_data *d)
-{
- if (d->hwirq < FIRST_LEGACY_IRQ)
- return;
-
- tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
-}
-
-static int tegra_retrigger(struct irq_data *d)
-{
- if (d->hwirq < FIRST_LEGACY_IRQ)
- return 0;
-
- tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
-
- return 1;
-}
-
#ifdef CONFIG_PM_SLEEP
-static int tegra_set_wake(struct irq_data *d, unsigned int enable)
-{
- u32 irq = d->hwirq;
- u32 index, mask;
-
- if (irq < FIRST_LEGACY_IRQ ||
- irq >= FIRST_LEGACY_IRQ + num_ictlrs * 32)
- return -EINVAL;
-
- index = ((irq - FIRST_LEGACY_IRQ) / 32);
- mask = BIT((irq - FIRST_LEGACY_IRQ) % 32);
- if (enable)
- ictlr_wake_mask[index] |= mask;
- else
- ictlr_wake_mask[index] &= ~mask;
-
- return 0;
-}
-
-static int tegra_legacy_irq_suspend(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < num_ictlrs; i++) {
- void __iomem *ictlr = ictlr_reg_base[i];
- /* Save interrupt state */
- cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER);
- cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS);
- cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER);
- cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS);
-
- /* Disable COP interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
-
- /* Disable CPU interrupts */
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
-
- /* Enable the wakeup sources of ictlr */
- writel_relaxed(ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET);
- }
- local_irq_restore(flags);
-
- return 0;
-}
-
-static void tegra_legacy_irq_resume(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < num_ictlrs; i++) {
- void __iomem *ictlr = ictlr_reg_base[i];
- writel_relaxed(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR);
- writel_relaxed(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
- writel_relaxed(cop_iep[i], ictlr + ICTLR_COP_IEP_CLASS);
- writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR);
- writel_relaxed(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
- }
- local_irq_restore(flags);
-}
-
-static struct syscore_ops tegra_legacy_irq_syscore_ops = {
- .suspend = tegra_legacy_irq_suspend,
- .resume = tegra_legacy_irq_resume,
-};
-
-int tegra_legacy_irq_syscore_init(void)
-{
- register_syscore_ops(&tegra_legacy_irq_syscore_ops);
-
- return 0;
-}
-
static int tegra_gic_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
@@ -251,45 +84,19 @@ static void tegra114_gic_cpu_pm_registration(void)
cpu_pm_register_notifier(&tegra_gic_notifier_block);
}
#else
-#define tegra_set_wake NULL
static void tegra114_gic_cpu_pm_registration(void) { }
#endif
+static const struct of_device_id tegra_ictlr_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-ictlr" },
+ { .compatible = "nvidia,tegra30-ictlr" },
+ { }
+};
+
void __init tegra_init_irq(void)
{
- int i;
- void __iomem *distbase;
-
- distbase = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
- num_ictlrs = readl_relaxed(distbase + GIC_DIST_CTR) & 0x1f;
-
- if (num_ictlrs > ARRAY_SIZE(ictlr_reg_base)) {
- WARN(1, "Too many (%d) interrupt controllers found. Maximum is %d.",
- num_ictlrs, ARRAY_SIZE(ictlr_reg_base));
- num_ictlrs = ARRAY_SIZE(ictlr_reg_base);
- }
-
- for (i = 0; i < num_ictlrs; i++) {
- void __iomem *ictlr = ictlr_reg_base[i];
- writel(~0, ictlr + ICTLR_CPU_IER_CLR);
- writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
- }
-
- gic_arch_extn.irq_ack = tegra_ack;
- gic_arch_extn.irq_eoi = tegra_eoi;
- gic_arch_extn.irq_mask = tegra_mask;
- gic_arch_extn.irq_unmask = tegra_unmask;
- gic_arch_extn.irq_retrigger = tegra_retrigger;
- gic_arch_extn.irq_set_wake = tegra_set_wake;
- gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND;
-
- /*
- * Check if there is a devicetree present, since the GIC will be
- * initialized elsewhere under DT.
- */
- if (!of_have_populated_dt())
- gic_init(0, 29, distbase,
- IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
+ if (WARN_ON(!of_find_matching_node(NULL, tegra_ictlr_match)))
+ pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
tegra114_gic_cpu_pm_registration();
}
diff --git a/arch/arm/mach-tegra/irq.h b/arch/arm/mach-tegra/irq.h
index bc05ce5..5142649 100644
--- a/arch/arm/mach-tegra/irq.h
+++ b/arch/arm/mach-tegra/irq.h
@@ -19,10 +19,4 @@
bool tegra_pending_sgi(void);
-#ifdef CONFIG_PM_SLEEP
-int tegra_legacy_irq_syscore_init(void);
-#else
-static inline int tegra_legacy_irq_syscore_init(void) { return 0; }
-#endif
-
#endif
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 914341b..861d884 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -82,7 +82,6 @@ static void __init tegra_dt_init_irq(void)
{
tegra_init_irq();
irqchip_init();
- tegra_legacy_irq_syscore_init();
}
static void __init tegra_dt_init(void)
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index dbb2970..6ced0f6 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -52,7 +52,7 @@ void ux500_restart(enum reboot_mode mode, const char *cmd)
*/
void __init ux500_init_irq(void)
{
- gic_arch_extn.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
+ gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
irqchip_init();
/*
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index c887196..58ef2a7 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -186,7 +186,7 @@ static void __init zynq_map_io(void)
static void __init zynq_irq_init(void)
{
- gic_arch_extn.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
+ gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
irqchip_init();
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3866f81..09c5fe3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2057,6 +2057,13 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (!iommu)
return false;
+ /*
+ * currently arm_iommu_create_mapping() takes a max of size_t
+ * for size param. So check this limit for now.
+ */
+ if (size > SIZE_MAX)
+ return false;
+
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ae369c1..be92fa0 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -286,6 +286,9 @@ void __init bootmem_init(void)
find_limits(&min, &max_low, &max_high);
+ early_memtest((phys_addr_t)min << PAGE_SHIFT,
+ (phys_addr_t)max_low << PAGE_SHIFT);
+
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5e85ed3..407dc78 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -169,14 +169,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
+unsigned long arch_mmap_rnd(void)
+{
+ unsigned long rnd;
+
+ /* 8 bits of randomness in 20 address space bits */
+ rnd = (unsigned long)get_random_int() % (1 << 8);
+
+ return rnd << PAGE_SHIFT;
+}
+
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
- random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 61b4d70..2438b96 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -44,24 +44,20 @@ static u64 notrace omap_32k_read_sched_clock(void)
}
/**
- * omap_read_persistent_clock - Return time from a persistent clock.
+ * omap_read_persistent_clock64 - Return time from a persistent clock.
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec.
+ * nsecs and adds to a monotonically increasing timespec64.
*/
-static struct timespec persistent_ts;
+static struct timespec64 persistent_ts;
static cycles_t cycles;
static unsigned int persistent_mult, persistent_shift;
-static DEFINE_SPINLOCK(read_persistent_clock_lock);
-static void omap_read_persistent_clock(struct timespec *ts)
+static void omap_read_persistent_clock64(struct timespec64 *ts)
{
unsigned long long nsecs;
cycles_t last_cycles;
- unsigned long flags;
-
- spin_lock_irqsave(&read_persistent_clock_lock, flags);
last_cycles = cycles;
cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
@@ -69,11 +65,9 @@ static void omap_read_persistent_clock(struct timespec *ts)
nsecs = clocksource_cyc2ns(cycles - last_cycles,
persistent_mult, persistent_shift);
- timespec_add_ns(&persistent_ts, nsecs);
+ timespec64_add_ns(&persistent_ts, nsecs);
*ts = persistent_ts;
-
- spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
}
/**
@@ -103,7 +97,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
/*
* 120000 rough estimate from the calculations in
- * __clocksource_updatefreq_scale.
+ * __clocksource_update_freq_scale.
*/
clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
32768, NSEC_PER_SEC, 120000);
@@ -116,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock);
+ register_persistent_clock(NULL, omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index db10169..8ca94d3 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *match;
const struct dmtimer_platform_data *pdata;
+ int ret;
match = of_match_device(of_match_ptr(omap_timer_match), dev);
pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
}
if (!timer->reserved) {
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+ __func__);
+ goto err_get_sync;
+ }
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
}
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg(dev, "Device Probed.\n");
return 0;
+
+err_get_sync:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
}
/**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
+ pm_runtime_disable(&pdev->dev);
+
return ret;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e973..34f487d 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,7 +1,7 @@
config ARM64
def_bool y
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -143,6 +143,13 @@ config KERNEL_MODE_NEON
config FIX_EARLYCON_MEM
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
+ default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
+ default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
+ default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -413,13 +420,6 @@ config ARM64_VA_BITS
default 42 if ARM64_VA_BITS_42
default 48 if ARM64_VA_BITS_48
-config ARM64_PGTABLE_LEVELS
- int
- default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
- default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
- default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
- default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
-
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
help
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index ea2b566..c9b89ef 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,7 +8,7 @@
*/
/* SoC fixed clocks */
- soc_uartclk: refclk72738khz {
+ soc_uartclk: refclk7273800hz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7273800>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index be1f12a..af6a452 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -48,7 +48,7 @@ CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
-CONFIG_ARM64_CPUIDLE=y
+CONFIG_ARM_CPUIDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb95930..d8c25b7 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
- cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
- o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable(); \
+ __ret; \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
+ raw_cpu_ptr(&(ptr2)), \
+ o1, o2, n1, n2); \
+ preempt_enable(); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index c60643f..141b2fc 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -4,10 +4,10 @@
#include <asm/proc-fns.h>
#ifdef CONFIG_CPU_IDLE
-extern int cpu_init_idle(unsigned int cpu);
+extern int arm_cpuidle_init(unsigned int cpu);
extern int cpu_suspend(unsigned long arg);
#else
-static inline int cpu_init_idle(unsigned int cpu)
+static inline int arm_cpuidle_init(unsigned int cpu)
{
return -EOPNOTSUPP;
}
@@ -17,5 +17,8 @@ static inline int cpu_suspend(unsigned long arg)
return -EOPNOTSUPP;
}
#endif
-
+static inline int arm_cpuidle_suspend(int index)
+{
+ return cpu_suspend(index);
+}
#endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 1f65be3..faad6df 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -125,7 +125,6 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-extern unsigned long randomize_et_dyn(unsigned long base);
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
/*
@@ -157,10 +156,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
#ifdef CONFIG_COMPAT
#ifdef __AARCH64EB__
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 92bbae3..7052245 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -90,6 +90,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_CV (UL(1) << 24)
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 076a1c7..c0e5165 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -18,11 +18,12 @@
*/
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <asm/insn.h>
-#ifdef __KERNEL__
-
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
static __always_inline bool arch_static_branch(struct static_key *key)
@@ -39,8 +40,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u64 jump_label_t;
struct jump_entry {
@@ -49,4 +48,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 94674eb..ac6fafb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -129,6 +129,9 @@
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
* not known to exist and will break with this configuration.
*
+ * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
+ * (see hyp-init.S).
+ *
* Note that when using 4K pages, we concatenate two first level page tables
* together.
*
@@ -138,7 +141,6 @@
#ifdef CONFIG_ARM64_64K_PAGES
/*
* Stage2 translation configuration:
- * 40bits output (PS = 2)
* 40bits input (T0SZ = 24)
* 64kB pages (TG0 = 1)
* 2 level page tables (SL = 1)
@@ -150,7 +152,6 @@
#else
/*
* Stage2 translation configuration:
- * 40bits output (PS = 2)
* 40bits input (T0SZ = 24)
* 4kB pages (TG0 = 0)
* 3 level page tables (SL = 1)
@@ -187,6 +188,7 @@
/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_ACCESS ESR_ELx_FSC_ACCESS
#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8ac3c70f..f0f58c9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -28,6 +28,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 9f52beb..889c908 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -31,28 +31,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b53..3625070 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,56 +158,21 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+
/*
* If we are concatenating first level stage-2 page tables, we would have less
* than or equal to 16 pointers in the fake PGD, because that's what the
- * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
+ * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
* represents the first level for the host, and we add 1 to go to the next
* level (which uses contatenation) for the stage-2 tables.
*/
#if PTRS_PER_S2_PGD <= 16
-#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
+#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
#else
#define KVM_PREALLOC_LEVEL (0)
#endif
-/**
- * kvm_prealloc_hwpgd - allocate inital table for VTTBR
- * @kvm: The KVM struct pointer for the VM.
- * @pgd: The kernel pseudo pgd
- *
- * When the kernel uses more levels of page tables than the guest, we allocate
- * a fake PGD and pre-populate it to point to the next-level page table, which
- * will be the real initial page table pointed to by the VTTBR.
- *
- * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
- * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
- * allocate 2 consecutive PUD pages.
- */
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
-{
- unsigned int i;
- unsigned long hwpgd;
-
- if (KVM_PREALLOC_LEVEL == 0)
- return 0;
-
- hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
- if (!hwpgd)
- return -ENOMEM;
-
- for (i = 0; i < PTRS_PER_S2_PGD; i++) {
- if (KVM_PREALLOC_LEVEL == 1)
- pgd_populate(NULL, pgd + i,
- (pud_t *)hwpgd + i * PTRS_PER_PUD);
- else if (KVM_PREALLOC_LEVEL == 2)
- pud_populate(NULL, pud_offset(pgd, 0) + i,
- (pmd_t *)hwpgd + i * PTRS_PER_PMD);
- }
-
- return 0;
-}
-
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
pgd_t *pgd = kvm->arch.pgd;
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
return pmd_offset(pud, 0);
}
-static inline void kvm_free_hwpgd(struct kvm *kvm)
+static inline unsigned int kvm_get_hwpgd_size(void)
{
- if (KVM_PREALLOC_LEVEL > 0) {
- unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
- free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
- }
+ if (KVM_PREALLOC_LEVEL > 0)
+ return PTRS_PER_S2_PGD * PAGE_SIZE;
+ return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33..101a42b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
+ /*
+ * init_mm.pgd does not contain any user mappings and it is always
+ * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+ */
+ if (next == &init_mm) {
+ cpu_set_reserved_ttbr0();
+ return;
+ }
+
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 22b1623..8fc8fa2 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -36,9 +36,9 @@
* for more information).
*/
#ifdef CONFIG_ARM64_64K_PAGES
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
#else
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1)
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#endif
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25b..4fde8c1 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret;
}
+#define _percpu_read(pcp) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable(); \
+ __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
+ sizeof(pcp)); \
+ preempt_enable(); \
+ __retval; \
+})
+
+#define _percpu_write(pcp, val) \
+do { \
+ preempt_disable(); \
+ __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
+ sizeof(pcp)); \
+ preempt_enable(); \
+} while(0) \
+
+#define _pcp_protect(operation, pcp, val) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable(); \
+ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
+ (val), sizeof(pcp)); \
+ preempt_enable(); \
+ __retval; \
+})
+
#define _percpu_add(pcp, val) \
- __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+ _pcp_protect(__percpu_add, pcp, val)
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
#define _percpu_and(pcp, val) \
- __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+ _pcp_protect(__percpu_and, pcp, val)
#define _percpu_or(pcp, val) \
- __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp)) \
- (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
- __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+ _pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) (typeof(pcp)) \
- (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e20df38..7642056 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -28,7 +28,7 @@
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -46,9 +46,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
}
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -66,7 +66,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
}
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5f930cc..80f3d24 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -21,7 +21,7 @@
/*
* PMD_SHIFT determines the size a level 2 page table entry can map.
*/
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -31,7 +31,7 @@
/*
* PUD_SHIFT determines the size a level 1 page table entry can map.
*/
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
#define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
@@ -42,7 +42,7 @@
* PGDIR_SHIFT determines the size a top-level page table entry can map
* (depending on the configuration, this level can be 0, 1 or 2).
*/
-#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
+#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index ca9df80..2b1bd7e 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -38,13 +38,13 @@ typedef struct { pteval_t pte; } pte_t;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) } )
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) } )
@@ -64,13 +64,13 @@ typedef pteval_t pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef pmdval_t pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef pudval_t pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
@@ -86,9 +86,9 @@ typedef pteval_t pgprot_t;
#endif /* STRICT_MM_TYPECHECKS */
-#if CONFIG_ARM64_PGTABLE_LEVELS == 2
+#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
-#elif CONFIG_ARM64_PGTABLE_LEVELS == 3
+#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 800ec0e..56283f8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -374,7 +374,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
*/
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
@@ -409,9 +409,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
@@ -445,7 +445,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
-#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84..941c375 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
-#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+#define cpu_switch_mm(pgd,mm) \
+do { \
+ BUG_ON(pgd == swapper_pg_dir); \
+ cpu_do_switch_mm(virt_to_phys(pgd),mm); \
+} while (0)
#define cpu_get_pgd() \
({ \
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 53d9c35..3a0242c 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -53,7 +53,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb_remove_entry(tlb, pte);
}
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
@@ -62,7 +62,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
}
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3ef77a4..c154c0b7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 5c08966..a78143a 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -15,7 +15,7 @@
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
-int cpu_init_idle(unsigned int cpu)
+int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b8d701..ab21e0d 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init);
static void efi_set_pgd(struct mm_struct *mm)
{
- cpu_switch_mm(mm->pgd, mm);
+ if (mm == &init_mm)
+ cpu_set_reserved_ttbr0();
+ else
+ cpu_switch_mm(mm->pgd, mm);
+
flush_tlb_all();
if (icache_is_aivivt())
__flush_icache_all();
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 98bbe06..e7d934d 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
+ if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 32aeea0..ec37ab3 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -200,7 +200,7 @@ up_fail:
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
- u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter");
+ u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
@@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
- vdso_data->cs_cycle_last = tk->tkr.cycle_last;
+ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
- vdso_data->cs_mult = tk->tkr.mult;
- vdso_data->cs_shift = tk->tkr.shift;
+ vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->cs_mult = tk->tkr_mono.mult;
+ vdso_data->cs_shift = tk->tkr_mono.shift;
}
smp_wmb();
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c8..5105e29 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on OF
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
@@ -25,10 +26,10 @@ config KVM
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
- select KVM_ARM_VGIC
- select KVM_ARM_TIMER
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
---help---
Support hosting virtualized guest machines.
@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool
- depends on KVM_ARM_VGIC
- ---help---
- Adds support for the Architected Timers in virtual machines.
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 4e6e09e..d5904f8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
+ccflags-y += -Iarch/arm64/kvm
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
-kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2b..ef7d112 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
-static void *__alloc_from_pool(size_t size, struct page **ret_page)
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
+ if (flags & __GFP_ZERO)
+ memset(ptr, 0, size);
}
return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page;
+ void *addr;
size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page));
- return page_address(page);
+ addr = page_address(page);
+ if (flags & __GFP_ZERO)
+ memset(addr, 0, size);
+ return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page);
+ void *addr = __alloc_from_pool(size, &page, flags);
if (addr)
*dma_handle = phys_to_dma(dev, page_to_phys(page));
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ae85da6..597831b 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -190,6 +190,8 @@ void __init bootmem_init(void)
min = PFN_UP(memblock_start_of_DRAM());
max = PFN_DOWN(memblock_end_of_DRAM());
+ early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
+
/*
* Sparsemem tries to allocate bootmem in memory_present(), so must be
* done after the fixed reservations.
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 54922d1..ed17747 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -47,17 +47,16 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
- if (current->flags & PF_RANDOMIZE)
- rnd = (long)get_random_int() & STACK_RND_MASK;
+ rnd = (unsigned long)get_random_int() & STACK_RND_MASK;
return rnd << PAGE_SHIFT;
}
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -66,7 +65,7 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
/*
@@ -75,15 +74,20 @@ static unsigned long mmap_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c6daaf6..79e0116 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -550,10 +550,10 @@ void vmemmap_free(unsigned long start, unsigned long end)
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
-#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index d232888..0388ece 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -84,7 +84,7 @@ typedef struct user_fpu_struct elf_fpregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 8ad3e90..1c72595 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -413,16 +413,14 @@ int __cpu_disable(void)
return 0;
}
-static DECLARE_COMPLETION(cpu_killed);
-
int __cpu_die(unsigned int cpu)
{
- return wait_for_completion_timeout(&cpu_killed, 5000);
+ return cpu_wait_death(cpu, 5);
}
void cpu_die(void)
{
- complete(&cpu_killed);
+ (void)cpu_report_death();
atomic_dec(&init_mm.mm_users);
atomic_dec(&init_mm.mm_count);
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
index 57d2ea8..3ae9f5a 100644
--- a/arch/c6x/kernel/process.c
+++ b/arch/c6x/kernel/process.c
@@ -101,7 +101,6 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
*/
usp -= 8;
- set_fs(USER_DS);
regs->pc = pc;
regs->sp = usp;
regs->tsr |= 0x40; /* set user mode */
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 336713a..85ca672 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -176,8 +176,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set)
struct sigframe __user *frame;
int rsig, sig = ksig->sig;
- set_fs(USER_DS);
-
frame = get_sigframe(ksig, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -257,8 +255,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set)
struct rt_sigframe __user *frame;
int rsig, sig = ksig->sig;
- set_fs(USER_DS);
-
frame = get_sigframe(ksig, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index b073f4d..f211839 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -316,6 +316,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
int __init pcibios_init(void)
{
+ struct pci_bus *bus;
struct pci_ops *dir = NULL;
LIST_HEAD(resources);
@@ -383,12 +384,15 @@ int __init pcibios_init(void)
printk("PCI: Probing PCI hardware\n");
pci_add_resource(&resources, &pci_ioport_resource);
pci_add_resource(&resources, &pci_iomem_resource);
- pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources);
+ bus = pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources);
pcibios_irq_init();
pcibios_fixup_irqs();
pcibios_resource_survey();
+ if (!bus)
+ return 0;
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 0a0dd5c..a9ebd47 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -37,8 +37,6 @@
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
- /* Set to run with user-mode data segmentation */
- set_fs(USER_DS);
/* We want to zero all data-containing registers. Is this overkill? */
memset(regs, 0, sizeof(*regs));
/* We might want to also zero all Processor registers here */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 074e52b..4f9a666 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -1,3 +1,8 @@
+config PGTABLE_LEVELS
+ int "Page Table Levels" if !IA64_PAGE_SIZE_64KB
+ range 3 4 if !IA64_PAGE_SIZE_64KB
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -286,19 +291,6 @@ config IA64_PAGE_SIZE_64KB
endchoice
-choice
- prompt "Page Table Levels"
- default PGTABLE_3
-
-config PGTABLE_3
- bool "3 Levels"
-
-config PGTABLE_4
- depends on !IA64_PAGE_SIZE_64KB
- bool "4 Levels"
-
-endchoice
-
if IA64_HP_SIM
config HZ
default 32
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 1f1bf14..ec48bb9 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -173,7 +173,7 @@ get_order (unsigned long size)
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
typedef struct { unsigned long pud; } pud_t;
#endif
typedef struct { unsigned long pgd; } pgd_t;
@@ -182,7 +182,7 @@ get_order (unsigned long size)
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
# define pud_val(x) ((x).pud)
#endif
# define pgd_val(x) ((x).pgd)
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 5767cdf..f5e70e9 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -32,7 +32,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
quicklist_free(0, NULL, pgd);
}
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
static inline void
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
{
@@ -49,7 +49,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
quicklist_free(0, NULL, pud);
}
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
-#endif /* CONFIG_PGTABLE_4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
static inline void
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7b6f880..9f3ed9e 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -99,7 +99,7 @@
#define PMD_MASK (~(PMD_SIZE-1))
#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
/*
* Definitions for second level:
*
@@ -117,7 +117,7 @@
*
* PGDIR_SHIFT determines what a first-level page table entry can map.
*/
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
#else
#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
@@ -180,7 +180,7 @@
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
#endif
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
@@ -281,7 +281,7 @@ extern unsigned long VMALLOC_END;
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
@@ -384,7 +384,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
/* Find an entry in the second-level page table.. */
#define pud_offset(dir,addr) \
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
@@ -586,7 +586,7 @@ extern struct page *zero_page_memmap_ptr;
#define __HAVE_ARCH_PGD_OFFSET_GATE
-#ifndef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#endif
#include <asm-generic/pgtable.h>
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 18e794a..e42bf7a 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -146,7 +146,7 @@ ENTRY(vhpt_miss)
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
shr.u r28=r22,PUD_SHIFT // shift pud index into position
#else
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -155,7 +155,7 @@ ENTRY(vhpt_miss)
ld8 r17=[r17] // get *pgd (may be 0)
;;
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
;;
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -222,13 +222,13 @@ ENTRY(vhpt_miss)
*/
ld8 r25=[r21] // read *pte again
ld8 r26=[r17] // read *pmd again
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
ld8 r19=[r28] // read *pud again
#endif
cmp.ne p6,p7=r0,r0
;;
cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
#endif
mov r27=PAGE_SHIFT<<2
@@ -476,7 +476,7 @@ ENTRY(nested_dtlb_miss)
(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
shr.u r18=r22,PUD_SHIFT // shift pud index into position
#else
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
@@ -487,7 +487,7 @@ ENTRY(nested_dtlb_miss)
(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
;;
-#ifdef CONFIG_PGTABLE_4
+#if CONFIG_PGTABLE_LEVELS == 4
(p7) ld8 r17=[r17] // get *pud (may be 0)
shr.u r18=r22,PMD_SHIFT // shift pmd index into position
;;
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 5151a64..b72cd7a 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -156,9 +156,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
VMCOREINFO_OFFSET(node_memblk_s, size);
#endif
-#ifdef CONFIG_PGTABLE_3
+#if CONFIG_PGTABLE_LEVELS == 3
VMCOREINFO_CONFIG(PGTABLE_3);
-#elif defined(CONFIG_PGTABLE_4)
+#elif CONFIG_PGTABLE_LEVELS == 4
VMCOREINFO_CONFIG(PGTABLE_4);
#endif
}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 0b5ce82..1be65eb 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -271,7 +271,9 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
if (bus == NULL) {
kfree(res);
kfree(controller);
+ return;
}
+ pci_bus_add_devices(bus);
}
/*
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 7736c66..8c25e0c 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -214,8 +214,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->r2 = (unsigned long)&frame->uc;
regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
- set_fs(USER_DS);
-
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 87b7c75..2dd8f63 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -67,6 +67,10 @@ config HZ
default 1000 if CLEOPATRA
default 100
+config PGTABLE_LEVELS
+ default 2 if SUN3 || COLDFIRE
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index df96792..821de92 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -313,12 +313,16 @@ static int __init mcf_pci_init(void)
schedule_timeout(msecs_to_jiffies(200));
rootbus = pci_scan_bus(0, &mcf_pci_ops, NULL);
+ if (!rootbus)
+ return -ENODEV;
+
rootbus->resource[0] = &mcf_pci_io;
rootbus->resource[1] = &mcf_pci_mem;
pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq);
pci_bus_size_bridges(rootbus);
pci_bus_assign_resources(rootbus);
+ pci_bus_add_devices(rootbus);
return 0;
}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 1a10a08..ed1643b 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -521,8 +521,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -573,5 +575,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 7859a73..d38822b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -479,8 +479,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -531,5 +533,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 372593a..c429199 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -501,8 +501,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -553,5 +555,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index f3bd35e..9b88037 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -472,8 +472,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -524,5 +526,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 9f9793f..49ae337 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -481,8 +481,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -533,5 +535,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 89f225c..ee143a5 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -503,8 +503,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -555,5 +557,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index d3cdb54..c777aa0 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -583,8 +583,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -635,5 +637,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b4c7664..a7628a8 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -472,8 +472,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -524,5 +526,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 0d4a26f..ebaa682 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -472,8 +472,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -524,5 +526,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5d581c5..2c16853 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -340,7 +340,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
-CONFIG_NE2000=m
+CONFIG_NE2000=y
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
@@ -494,8 +494,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -546,5 +548,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index c6b49a4..e3056bf 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -473,8 +473,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -524,5 +526,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index b65785e..73c36b7 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -473,8 +473,10 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
@@ -525,5 +527,6 @@ CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
# CONFIG_CRYPTO_HW is not set
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/mcfqspi.h b/arch/m68k/include/asm/mcfqspi.h
index 7b51416..256da0e 100644
--- a/arch/m68k/include/asm/mcfqspi.h
+++ b/arch/m68k/include/asm/mcfqspi.h
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef mcfqspi_h
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
index 931a31f..8520250 100644
--- a/arch/m68k/kernel/pcibios.c
+++ b/arch/m68k/kernel/pcibios.c
@@ -62,7 +62,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
r = dev->resource + idx;
if (!r->start && r->end) {
- pr_err(KERN_ERR "PCI: Device %s not available because of resource collisions\n",
+ pr_err("PCI: Device %s not available because of resource collisions\n",
pci_name(dev));
return -EINVAL;
}
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c
index 7729f33..37234c2 100644
--- a/arch/m68k/lib/ashldi3.c
+++ b/arch/m68k/lib/ashldi3.c
@@ -11,12 +11,7 @@ any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+GNU General Public License for more details. */
#define BITS_PER_UNIT 8
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c
index 18ea5f7..1d59345 100644
--- a/arch/m68k/lib/ashrdi3.c
+++ b/arch/m68k/lib/ashrdi3.c
@@ -11,12 +11,7 @@ any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+GNU General Public License for more details. */
#define BITS_PER_UNIT 8
diff --git a/arch/m68k/lib/divsi3.S b/arch/m68k/lib/divsi3.S
index ec307b6..2c0ec85 100644
--- a/arch/m68k/lib/divsi3.S
+++ b/arch/m68k/lib/divsi3.S
@@ -19,12 +19,7 @@ distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c
index d06442d..49e1ec8 100644
--- a/arch/m68k/lib/lshrdi3.c
+++ b/arch/m68k/lib/lshrdi3.c
@@ -11,12 +11,7 @@ any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+GNU General Public License for more details. */
#define BITS_PER_UNIT 8
diff --git a/arch/m68k/lib/modsi3.S b/arch/m68k/lib/modsi3.S
index ef38494..1d9e0ef 100644
--- a/arch/m68k/lib/modsi3.S
+++ b/arch/m68k/lib/modsi3.S
@@ -19,12 +19,7 @@ distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index ee5f0b1..9006d15 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -12,12 +12,7 @@ any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+GNU General Public License for more details. */
#ifdef CONFIG_CPU_HAS_NO_MULDIV64
diff --git a/arch/m68k/lib/mulsi3.S b/arch/m68k/lib/mulsi3.S
index ce29ea3..c39ad4e 100644
--- a/arch/m68k/lib/mulsi3.S
+++ b/arch/m68k/lib/mulsi3.S
@@ -19,12 +19,7 @@ distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/arch/m68k/lib/udivsi3.S b/arch/m68k/lib/udivsi3.S
index c424c4a..35a5446 100644
--- a/arch/m68k/lib/udivsi3.S
+++ b/arch/m68k/lib/udivsi3.S
@@ -19,12 +19,7 @@ distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/arch/m68k/lib/umodsi3.S b/arch/m68k/lib/umodsi3.S
index 5def5f6..099da51 100644
--- a/arch/m68k/lib/umodsi3.S
+++ b/arch/m68k/lib/umodsi3.S
@@ -19,12 +19,7 @@ distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 5403712..bb11dce 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -47,9 +47,8 @@ void __init oss_init(void)
/* Disable all interrupts. Unlike a VIA it looks like we */
/* do this by setting the source's interrupt level to zero. */
- for (i = 0; i <= OSS_NUM_SOURCES; i++) {
+ for (i = 0; i < OSS_NUM_SOURCES; i++)
oss->irq_level[i] = 0;
- }
}
/*
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e50..d5779b0 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
#define _ASM_METAG_IO_H
#include <linux/types.h>
+#include <asm/pgtable-bits.h>
#define IO_SPACE_LIMIT 0
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 0000000..25ba672
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
+/*
+ * Meta page table definitions.
+ */
+
+#ifndef _METAG_PGTABLE_BITS_H
+#define _METAG_PGTABLE_BITS_H
+
+#include <asm/metag_mem.h>
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1 0x020
+#define _PAGE_CACHE_CTRL0 0x040
+#define _PAGE_CACHE_CTRL1 0x080
+#define _PAGE_ALWAYS_ZERO_2 0x100
+#define _PAGE_ALWAYS_ZERO_3 0x200
+#define _PAGE_ALWAYS_ZERO_4 0x400
+#define _PAGE_ALWAYS_ZERO_5 0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used. Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+ _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT 1
+#define _PAGE_SZ_4K (0x0)
+#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE (_PAGE_SZ_4M)
+#endif
+
+#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0..ffa3a3a 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
#ifndef _METAG_PGTABLE_H
#define _METAG_PGTABLE_H
+#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
#endif
/*
- * Definitions for MMU descriptors
- *
- * These are the hardware bits in the MMCU pte entries.
- * Derived from the Meta toolkit headers.
- */
-#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
-#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
-#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
-/* Write combine bit - this can cause writes to occur out of order */
-#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
-/* Sys coherent bit - this bit is never used by Linux */
-#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
-#define _PAGE_ALWAYS_ZERO_1 0x020
-#define _PAGE_CACHE_CTRL0 0x040
-#define _PAGE_CACHE_CTRL1 0x080
-#define _PAGE_ALWAYS_ZERO_2 0x100
-#define _PAGE_ALWAYS_ZERO_3 0x200
-#define _PAGE_ALWAYS_ZERO_4 0x400
-#define _PAGE_ALWAYS_ZERO_5 0x800
-
-/* These are software bits that we stuff into the gaps in the hardware
- * pte entries that are not used. Note, these DO get stored in the actual
- * hardware, but the hardware just does not use them.
- */
-#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
-#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
-
-/* Pages owned, and protected by, the kernel. */
-#define _PAGE_KERNEL _PAGE_PRIV
-
-/* No cacheing of this page */
-#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
-/* burst cacheing - good for data streaming */
-#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
-/* One cache way per thread */
-#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
-/* Full on cacheing */
-#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
-
-#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
-
-/* which bits are used for cache control ... */
-#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
- _PAGE_WR_COMBINE)
-
-/* This is a mask of the bits that pte_modify is allowed to change. */
-#define _PAGE_CHG_MASK (PAGE_MASK)
-
-#define _PAGE_SZ_SHIFT 1
-#define _PAGE_SZ_4K (0x0)
-#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
-
-#if defined(CONFIG_PAGE_SIZE_4K)
-#define _PAGE_SZ (_PAGE_SZ_4K)
-#elif defined(CONFIG_PAGE_SIZE_8K)
-#define _PAGE_SZ (_PAGE_SZ_8K)
-#elif defined(CONFIG_PAGE_SIZE_16K)
-#define _PAGE_SZ (_PAGE_SZ_16K)
-#endif
-#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
-# define _PAGE_SZHUGE (_PAGE_SZ_8K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
-# define _PAGE_SZHUGE (_PAGE_SZ_16K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
-# define _PAGE_SZHUGE (_PAGE_SZ_32K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-# define _PAGE_SZHUGE (_PAGE_SZ_64K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
-# define _PAGE_SZHUGE (_PAGE_SZ_128K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
-# define _PAGE_SZHUGE (_PAGE_SZ_256K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-# define _PAGE_SZHUGE (_PAGE_SZ_512K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
-# define _PAGE_SZHUGE (_PAGE_SZ_1M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
-# define _PAGE_SZHUGE (_PAGE_SZ_2M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
-# define _PAGE_SZHUGE (_PAGE_SZ_4M)
-#endif
-
-/*
* The Linux memory management assumes a three-level page table setup. On
* Meta, we use that, but "fold" the mid level into the top-level page
* table.
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 13272fd..0838ca6 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -111,7 +111,6 @@ struct thread_struct {
*/
#define start_thread(regs, pc, usp) do { \
unsigned int *argc = (unsigned int *) bprm->exec; \
- set_fs(USER_DS); \
current->thread.int_depth = 1; \
/* Force this process down to user land */ \
regs->ctx.SaveMask = TBICTX_PRIV_BIT; \
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index f006d22..ac3a199 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -261,7 +261,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-static DECLARE_COMPLETION(cpu_killed);
/*
* __cpu_disable runs on the processor to be shutdown.
@@ -299,7 +298,7 @@ int __cpu_disable(void)
*/
void __cpu_die(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
+ if (!cpu_wait_death(cpu, 1))
pr_err("CPU%u: unable to kill\n", cpu);
}
@@ -314,7 +313,7 @@ void cpu_die(void)
local_irq_disable();
idle_task_exit();
- complete(&cpu_killed);
+ (void)cpu_report_death();
asm ("XOR TXENABLE, D0Re0,D0Re0\n");
}
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index a1cbaf9..20ccd4e 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -236,8 +236,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Offset to handle microblaze rtid r14, 0 */
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
- set_fs(USER_DS);
-
#ifdef DEBUG_SIG
pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 48528fb..ae838ed 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1382,6 +1382,10 @@ static int __init pcibios_init(void)
/* Call common code to handle resource allocation */
pcibios_resource_survey();
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ if (hose->bus)
+ pci_bus_add_devices(hose->bus);
+ }
return 0;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c7a1690..a326c4c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -23,7 +23,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select HAVE_SYSCALL_TRACEPOINTS
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
@@ -2600,6 +2600,11 @@ config STACKTRACE_SUPPORT
bool
default y
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT && !PAGE_SIZE_64KB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index cdac7b3..8038647 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -16,38 +16,38 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0_LS64(\thread)
- swc1 $f1, THREAD_FPR1_LS64(\thread)
- swc1 $f2, THREAD_FPR2_LS64(\thread)
- swc1 $f3, THREAD_FPR3_LS64(\thread)
- swc1 $f4, THREAD_FPR4_LS64(\thread)
- swc1 $f5, THREAD_FPR5_LS64(\thread)
- swc1 $f6, THREAD_FPR6_LS64(\thread)
- swc1 $f7, THREAD_FPR7_LS64(\thread)
- swc1 $f8, THREAD_FPR8_LS64(\thread)
- swc1 $f9, THREAD_FPR9_LS64(\thread)
- swc1 $f10, THREAD_FPR10_LS64(\thread)
- swc1 $f11, THREAD_FPR11_LS64(\thread)
- swc1 $f12, THREAD_FPR12_LS64(\thread)
- swc1 $f13, THREAD_FPR13_LS64(\thread)
- swc1 $f14, THREAD_FPR14_LS64(\thread)
- swc1 $f15, THREAD_FPR15_LS64(\thread)
- swc1 $f16, THREAD_FPR16_LS64(\thread)
- swc1 $f17, THREAD_FPR17_LS64(\thread)
- swc1 $f18, THREAD_FPR18_LS64(\thread)
- swc1 $f19, THREAD_FPR19_LS64(\thread)
- swc1 $f20, THREAD_FPR20_LS64(\thread)
- swc1 $f21, THREAD_FPR21_LS64(\thread)
- swc1 $f22, THREAD_FPR22_LS64(\thread)
- swc1 $f23, THREAD_FPR23_LS64(\thread)
- swc1 $f24, THREAD_FPR24_LS64(\thread)
- swc1 $f25, THREAD_FPR25_LS64(\thread)
- swc1 $f26, THREAD_FPR26_LS64(\thread)
- swc1 $f27, THREAD_FPR27_LS64(\thread)
- swc1 $f28, THREAD_FPR28_LS64(\thread)
- swc1 $f29, THREAD_FPR29_LS64(\thread)
- swc1 $f30, THREAD_FPR30_LS64(\thread)
- swc1 $f31, THREAD_FPR31_LS64(\thread)
+ swc1 $f0, THREAD_FPR0(\thread)
+ swc1 $f1, THREAD_FPR1(\thread)
+ swc1 $f2, THREAD_FPR2(\thread)
+ swc1 $f3, THREAD_FPR3(\thread)
+ swc1 $f4, THREAD_FPR4(\thread)
+ swc1 $f5, THREAD_FPR5(\thread)
+ swc1 $f6, THREAD_FPR6(\thread)
+ swc1 $f7, THREAD_FPR7(\thread)
+ swc1 $f8, THREAD_FPR8(\thread)
+ swc1 $f9, THREAD_FPR9(\thread)
+ swc1 $f10, THREAD_FPR10(\thread)
+ swc1 $f11, THREAD_FPR11(\thread)
+ swc1 $f12, THREAD_FPR12(\thread)
+ swc1 $f13, THREAD_FPR13(\thread)
+ swc1 $f14, THREAD_FPR14(\thread)
+ swc1 $f15, THREAD_FPR15(\thread)
+ swc1 $f16, THREAD_FPR16(\thread)
+ swc1 $f17, THREAD_FPR17(\thread)
+ swc1 $f18, THREAD_FPR18(\thread)
+ swc1 $f19, THREAD_FPR19(\thread)
+ swc1 $f20, THREAD_FPR20(\thread)
+ swc1 $f21, THREAD_FPR21(\thread)
+ swc1 $f22, THREAD_FPR22(\thread)
+ swc1 $f23, THREAD_FPR23(\thread)
+ swc1 $f24, THREAD_FPR24(\thread)
+ swc1 $f25, THREAD_FPR25(\thread)
+ swc1 $f26, THREAD_FPR26(\thread)
+ swc1 $f27, THREAD_FPR27(\thread)
+ swc1 $f28, THREAD_FPR28(\thread)
+ swc1 $f29, THREAD_FPR29(\thread)
+ swc1 $f30, THREAD_FPR30(\thread)
+ swc1 $f31, THREAD_FPR31(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -56,38 +56,38 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0_LS64(\thread)
- lwc1 $f1, THREAD_FPR1_LS64(\thread)
- lwc1 $f2, THREAD_FPR2_LS64(\thread)
- lwc1 $f3, THREAD_FPR3_LS64(\thread)
- lwc1 $f4, THREAD_FPR4_LS64(\thread)
- lwc1 $f5, THREAD_FPR5_LS64(\thread)
- lwc1 $f6, THREAD_FPR6_LS64(\thread)
- lwc1 $f7, THREAD_FPR7_LS64(\thread)
- lwc1 $f8, THREAD_FPR8_LS64(\thread)
- lwc1 $f9, THREAD_FPR9_LS64(\thread)
- lwc1 $f10, THREAD_FPR10_LS64(\thread)
- lwc1 $f11, THREAD_FPR11_LS64(\thread)
- lwc1 $f12, THREAD_FPR12_LS64(\thread)
- lwc1 $f13, THREAD_FPR13_LS64(\thread)
- lwc1 $f14, THREAD_FPR14_LS64(\thread)
- lwc1 $f15, THREAD_FPR15_LS64(\thread)
- lwc1 $f16, THREAD_FPR16_LS64(\thread)
- lwc1 $f17, THREAD_FPR17_LS64(\thread)
- lwc1 $f18, THREAD_FPR18_LS64(\thread)
- lwc1 $f19, THREAD_FPR19_LS64(\thread)
- lwc1 $f20, THREAD_FPR20_LS64(\thread)
- lwc1 $f21, THREAD_FPR21_LS64(\thread)
- lwc1 $f22, THREAD_FPR22_LS64(\thread)
- lwc1 $f23, THREAD_FPR23_LS64(\thread)
- lwc1 $f24, THREAD_FPR24_LS64(\thread)
- lwc1 $f25, THREAD_FPR25_LS64(\thread)
- lwc1 $f26, THREAD_FPR26_LS64(\thread)
- lwc1 $f27, THREAD_FPR27_LS64(\thread)
- lwc1 $f28, THREAD_FPR28_LS64(\thread)
- lwc1 $f29, THREAD_FPR29_LS64(\thread)
- lwc1 $f30, THREAD_FPR30_LS64(\thread)
- lwc1 $f31, THREAD_FPR31_LS64(\thread)
+ lwc1 $f0, THREAD_FPR0(\thread)
+ lwc1 $f1, THREAD_FPR1(\thread)
+ lwc1 $f2, THREAD_FPR2(\thread)
+ lwc1 $f3, THREAD_FPR3(\thread)
+ lwc1 $f4, THREAD_FPR4(\thread)
+ lwc1 $f5, THREAD_FPR5(\thread)
+ lwc1 $f6, THREAD_FPR6(\thread)
+ lwc1 $f7, THREAD_FPR7(\thread)
+ lwc1 $f8, THREAD_FPR8(\thread)
+ lwc1 $f9, THREAD_FPR9(\thread)
+ lwc1 $f10, THREAD_FPR10(\thread)
+ lwc1 $f11, THREAD_FPR11(\thread)
+ lwc1 $f12, THREAD_FPR12(\thread)
+ lwc1 $f13, THREAD_FPR13(\thread)
+ lwc1 $f14, THREAD_FPR14(\thread)
+ lwc1 $f15, THREAD_FPR15(\thread)
+ lwc1 $f16, THREAD_FPR16(\thread)
+ lwc1 $f17, THREAD_FPR17(\thread)
+ lwc1 $f18, THREAD_FPR18(\thread)
+ lwc1 $f19, THREAD_FPR19(\thread)
+ lwc1 $f20, THREAD_FPR20(\thread)
+ lwc1 $f21, THREAD_FPR21(\thread)
+ lwc1 $f22, THREAD_FPR22(\thread)
+ lwc1 $f23, THREAD_FPR23(\thread)
+ lwc1 $f24, THREAD_FPR24(\thread)
+ lwc1 $f25, THREAD_FPR25(\thread)
+ lwc1 $f26, THREAD_FPR26(\thread)
+ lwc1 $f27, THREAD_FPR27(\thread)
+ lwc1 $f28, THREAD_FPR28(\thread)
+ lwc1 $f29, THREAD_FPR29(\thread)
+ lwc1 $f30, THREAD_FPR30(\thread)
+ lwc1 $f31, THREAD_FPR31(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 0cae459..6156ac8 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -60,22 +60,22 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- sdc1 $f0, THREAD_FPR0_LS64(\thread)
- sdc1 $f2, THREAD_FPR2_LS64(\thread)
- sdc1 $f4, THREAD_FPR4_LS64(\thread)
- sdc1 $f6, THREAD_FPR6_LS64(\thread)
- sdc1 $f8, THREAD_FPR8_LS64(\thread)
- sdc1 $f10, THREAD_FPR10_LS64(\thread)
- sdc1 $f12, THREAD_FPR12_LS64(\thread)
- sdc1 $f14, THREAD_FPR14_LS64(\thread)
- sdc1 $f16, THREAD_FPR16_LS64(\thread)
- sdc1 $f18, THREAD_FPR18_LS64(\thread)
- sdc1 $f20, THREAD_FPR20_LS64(\thread)
- sdc1 $f22, THREAD_FPR22_LS64(\thread)
- sdc1 $f24, THREAD_FPR24_LS64(\thread)
- sdc1 $f26, THREAD_FPR26_LS64(\thread)
- sdc1 $f28, THREAD_FPR28_LS64(\thread)
- sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ sdc1 $f0, THREAD_FPR0(\thread)
+ sdc1 $f2, THREAD_FPR2(\thread)
+ sdc1 $f4, THREAD_FPR4(\thread)
+ sdc1 $f6, THREAD_FPR6(\thread)
+ sdc1 $f8, THREAD_FPR8(\thread)
+ sdc1 $f10, THREAD_FPR10(\thread)
+ sdc1 $f12, THREAD_FPR12(\thread)
+ sdc1 $f14, THREAD_FPR14(\thread)
+ sdc1 $f16, THREAD_FPR16(\thread)
+ sdc1 $f18, THREAD_FPR18(\thread)
+ sdc1 $f20, THREAD_FPR20(\thread)
+ sdc1 $f22, THREAD_FPR22(\thread)
+ sdc1 $f24, THREAD_FPR24(\thread)
+ sdc1 $f26, THREAD_FPR26(\thread)
+ sdc1 $f28, THREAD_FPR28(\thread)
+ sdc1 $f30, THREAD_FPR30(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -84,22 +84,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- sdc1 $f1, THREAD_FPR1_LS64(\thread)
- sdc1 $f3, THREAD_FPR3_LS64(\thread)
- sdc1 $f5, THREAD_FPR5_LS64(\thread)
- sdc1 $f7, THREAD_FPR7_LS64(\thread)
- sdc1 $f9, THREAD_FPR9_LS64(\thread)
- sdc1 $f11, THREAD_FPR11_LS64(\thread)
- sdc1 $f13, THREAD_FPR13_LS64(\thread)
- sdc1 $f15, THREAD_FPR15_LS64(\thread)
- sdc1 $f17, THREAD_FPR17_LS64(\thread)
- sdc1 $f19, THREAD_FPR19_LS64(\thread)
- sdc1 $f21, THREAD_FPR21_LS64(\thread)
- sdc1 $f23, THREAD_FPR23_LS64(\thread)
- sdc1 $f25, THREAD_FPR25_LS64(\thread)
- sdc1 $f27, THREAD_FPR27_LS64(\thread)
- sdc1 $f29, THREAD_FPR29_LS64(\thread)
- sdc1 $f31, THREAD_FPR31_LS64(\thread)
+ sdc1 $f1, THREAD_FPR1(\thread)
+ sdc1 $f3, THREAD_FPR3(\thread)
+ sdc1 $f5, THREAD_FPR5(\thread)
+ sdc1 $f7, THREAD_FPR7(\thread)
+ sdc1 $f9, THREAD_FPR9(\thread)
+ sdc1 $f11, THREAD_FPR11(\thread)
+ sdc1 $f13, THREAD_FPR13(\thread)
+ sdc1 $f15, THREAD_FPR15(\thread)
+ sdc1 $f17, THREAD_FPR17(\thread)
+ sdc1 $f19, THREAD_FPR19(\thread)
+ sdc1 $f21, THREAD_FPR21(\thread)
+ sdc1 $f23, THREAD_FPR23(\thread)
+ sdc1 $f25, THREAD_FPR25(\thread)
+ sdc1 $f27, THREAD_FPR27(\thread)
+ sdc1 $f29, THREAD_FPR29(\thread)
+ sdc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -118,22 +118,22 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0_LS64(\thread)
- ldc1 $f2, THREAD_FPR2_LS64(\thread)
- ldc1 $f4, THREAD_FPR4_LS64(\thread)
- ldc1 $f6, THREAD_FPR6_LS64(\thread)
- ldc1 $f8, THREAD_FPR8_LS64(\thread)
- ldc1 $f10, THREAD_FPR10_LS64(\thread)
- ldc1 $f12, THREAD_FPR12_LS64(\thread)
- ldc1 $f14, THREAD_FPR14_LS64(\thread)
- ldc1 $f16, THREAD_FPR16_LS64(\thread)
- ldc1 $f18, THREAD_FPR18_LS64(\thread)
- ldc1 $f20, THREAD_FPR20_LS64(\thread)
- ldc1 $f22, THREAD_FPR22_LS64(\thread)
- ldc1 $f24, THREAD_FPR24_LS64(\thread)
- ldc1 $f26, THREAD_FPR26_LS64(\thread)
- ldc1 $f28, THREAD_FPR28_LS64(\thread)
- ldc1 $f30, THREAD_FPR30_LS64(\thread)
+ ldc1 $f0, THREAD_FPR0(\thread)
+ ldc1 $f2, THREAD_FPR2(\thread)
+ ldc1 $f4, THREAD_FPR4(\thread)
+ ldc1 $f6, THREAD_FPR6(\thread)
+ ldc1 $f8, THREAD_FPR8(\thread)
+ ldc1 $f10, THREAD_FPR10(\thread)
+ ldc1 $f12, THREAD_FPR12(\thread)
+ ldc1 $f14, THREAD_FPR14(\thread)
+ ldc1 $f16, THREAD_FPR16(\thread)
+ ldc1 $f18, THREAD_FPR18(\thread)
+ ldc1 $f20, THREAD_FPR20(\thread)
+ ldc1 $f22, THREAD_FPR22(\thread)
+ ldc1 $f24, THREAD_FPR24(\thread)
+ ldc1 $f26, THREAD_FPR26(\thread)
+ ldc1 $f28, THREAD_FPR28(\thread)
+ ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.endm
@@ -141,22 +141,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- ldc1 $f1, THREAD_FPR1_LS64(\thread)
- ldc1 $f3, THREAD_FPR3_LS64(\thread)
- ldc1 $f5, THREAD_FPR5_LS64(\thread)
- ldc1 $f7, THREAD_FPR7_LS64(\thread)
- ldc1 $f9, THREAD_FPR9_LS64(\thread)
- ldc1 $f11, THREAD_FPR11_LS64(\thread)
- ldc1 $f13, THREAD_FPR13_LS64(\thread)
- ldc1 $f15, THREAD_FPR15_LS64(\thread)
- ldc1 $f17, THREAD_FPR17_LS64(\thread)
- ldc1 $f19, THREAD_FPR19_LS64(\thread)
- ldc1 $f21, THREAD_FPR21_LS64(\thread)
- ldc1 $f23, THREAD_FPR23_LS64(\thread)
- ldc1 $f25, THREAD_FPR25_LS64(\thread)
- ldc1 $f27, THREAD_FPR27_LS64(\thread)
- ldc1 $f29, THREAD_FPR29_LS64(\thread)
- ldc1 $f31, THREAD_FPR31_LS64(\thread)
+ ldc1 $f1, THREAD_FPR1(\thread)
+ ldc1 $f3, THREAD_FPR3(\thread)
+ ldc1 $f5, THREAD_FPR5(\thread)
+ ldc1 $f7, THREAD_FPR7(\thread)
+ ldc1 $f9, THREAD_FPR9(\thread)
+ ldc1 $f11, THREAD_FPR11(\thread)
+ ldc1 $f13, THREAD_FPR13(\thread)
+ ldc1 $f15, THREAD_FPR15(\thread)
+ ldc1 $f17, THREAD_FPR17(\thread)
+ ldc1 $f19, THREAD_FPR19(\thread)
+ ldc1 $f21, THREAD_FPR21(\thread)
+ ldc1 $f23, THREAD_FPR23(\thread)
+ ldc1 $f25, THREAD_FPR25(\thread)
+ ldc1 $f27, THREAD_FPR27(\thread)
+ ldc1 $f29, THREAD_FPR29(\thread)
+ ldc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -211,6 +211,22 @@
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro _cfcmsa rd, cs
+ .set push
+ .set mips32r2
+ .set msa
+ cfcmsa \rd, $\cs
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set mips32r2
+ .set msa
+ ctcmsa $\cd, \rs
+ .set pop
+ .endm
+
.macro ld_d wd, off, base
.set push
.set mips32r2
@@ -227,35 +243,35 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set mips32r2
.set msa
- copy_u.w \rd, $w\ws[\n]
+ copy_u.w $1, $w\ws[\n]
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set mips64r2
.set msa
- copy_u.d \rd, $w\ws[\n]
+ copy_u.d $1, $w\ws[\n]
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set mips32r2
.set msa
- insert.w $w\wd[\n], \rs
+ insert.w $w\wd[\n], $1
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set mips64r2
.set msa
- insert.d $w\wd[\n], \rs
+ insert.d $w\wd[\n], $1
.set pop
.endm
#else
@@ -283,7 +299,7 @@
/*
* Temporary until all toolchains in use include MSA support.
*/
- .macro cfcmsa rd, cs
+ .macro _cfcmsa rd, cs
.set push
.set noat
SET_HARDFLOAT
@@ -293,7 +309,7 @@
.set pop
.endm
- .macro ctcmsa cd, rs
+ .macro _ctcmsa cd, rs
.set push
.set noat
SET_HARDFLOAT
@@ -320,44 +336,36 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -399,7 +407,7 @@
.set push
.set noat
SET_HARDFLOAT
- cfcmsa $1, MSA_CSR
+ _cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
.set pop
.endm
@@ -409,7 +417,7 @@
.set noat
SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
- ctcmsa MSA_CSR, $1
+ _ctcmsa MSA_CSR, $1
.set pop
ld_d 0, THREAD_FPR0, \thread
ld_d 1, THREAD_FPR1, \thread
@@ -452,9 +460,6 @@
insert_w \wd, 2
insert_w \wd, 3
#endif
- .if 31-\wd
- msa_init_upper (\wd+1)
- .endif
.endm
.macro msa_init_all_upper
@@ -463,6 +468,37 @@
SET_HARDFLOAT
not $1, zero
msa_init_upper 0
+ msa_init_upper 1
+ msa_init_upper 2
+ msa_init_upper 3
+ msa_init_upper 4
+ msa_init_upper 5
+ msa_init_upper 6
+ msa_init_upper 7
+ msa_init_upper 8
+ msa_init_upper 9
+ msa_init_upper 10
+ msa_init_upper 11
+ msa_init_upper 12
+ msa_init_upper 13
+ msa_init_upper 14
+ msa_init_upper 15
+ msa_init_upper 16
+ msa_init_upper 17
+ msa_init_upper 18
+ msa_init_upper 19
+ msa_init_upper 20
+ msa_init_upper 21
+ msa_init_upper 22
+ msa_init_upper 23
+ msa_init_upper 24
+ msa_init_upper 25
+ msa_init_upper 26
+ msa_init_upper 27
+ msa_init_upper 28
+ msa_init_upper 29
+ msa_init_upper 30
+ msa_init_upper 31
.set pop
.endm
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 535f196..31d747d 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -410,10 +410,6 @@ struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
struct arch_elf_state {
int fp_abi;
int interp_fp_abi;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd083e9..b104ad9 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -48,6 +48,12 @@ enum fpu_mode {
#define FPU_FR_MASK 0x1
};
+#define __disable_fpu() \
+do { \
+ clear_c0_status(ST0_CU1); \
+ disable_fpu_hazard(); \
+} while (0)
+
static inline int __enable_fpu(enum fpu_mode mode)
{
int fr;
@@ -86,7 +92,12 @@ fr_common:
enable_fpu_hazard();
/* check FR has the desired value */
- return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
+ if (!!(read_c0_status() & ST0_FR) == !!fr)
+ return 0;
+
+ /* unsupported FR value */
+ __disable_fpu();
+ return SIGFPE;
default:
BUG();
@@ -95,12 +106,6 @@ fr_common:
return SIGFPE;
}
-#define __disable_fpu() \
-do { \
- clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
-} while (0)
-
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void)
@@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
+ __disable_fpu();
} else if (is_fpu_owner()) {
if (save)
_save_fp(current);
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index fdbff44..608aa57 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -8,9 +8,9 @@
#ifndef _ASM_MIPS_JUMP_LABEL_H
#define _ASM_MIPS_JUMP_LABEL_H
-#include <linux/types.h>
+#ifndef __ASSEMBLY__
-#ifdef __KERNEL__
+#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -39,8 +39,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
#ifdef CONFIG_64BIT
typedef u64 jump_label_t;
#else
@@ -53,4 +51,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_MIPS_JUMP_LABEL_H */
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index 6a9af5f..cba22ab 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -10,7 +10,8 @@ enum die_val {
DIE_RI,
DIE_PAGE_FAULT,
DIE_BREAK,
- DIE_SSTEPBP
+ DIE_SSTEPBP,
+ DIE_MSAFP
};
#endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index ac4fc71..4c25823 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -21,10 +21,10 @@
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
#define MIPS_CP0_64(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
@@ -42,11 +42,14 @@
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
@@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
u32 syscall_exits;
u32 resvd_inst_exits;
u32 break_inst_exits;
+ u32 trap_inst_exits;
+ u32 msa_fpe_exits;
+ u32 fpe_exits;
+ u32 msa_disabled_exits;
u32 flush_dcache_exits;
u32 halt_successful_poll;
u32 halt_wakeup;
@@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
SYSCALL_EXITS,
RESVD_INST_EXITS,
BREAK_INST_EXITS,
+ TRAP_INST_EXITS,
+ MSA_FPE_EXITS,
+ FPE_EXITS,
+ MSA_DISABLED_EXITS,
FLUSH_DCACHE_EXITS,
MAX_KVM_MIPS_EXIT_TYPES
};
@@ -206,6 +217,8 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG1_SEL 1
#define MIPS_CP0_CONFIG2_SEL 2
#define MIPS_CP0_CONFIG3_SEL 3
+#define MIPS_CP0_CONFIG4_SEL 4
+#define MIPS_CP0_CONFIG5_SEL 5
/* Config0 register bits */
#define CP0C0_M 31
@@ -262,31 +275,6 @@ struct mips_coproc {
#define CP0C3_SM 1
#define CP0C3_TL 0
-/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
-#define MIPS_CONFIG0 \
- ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
-
-/* Have config2, no coprocessor2 attached, no MDMX support attached,
- no performance counters, watch registers present,
- no code compression, EJTAG present, no FPU, no watch registers */
-#define MIPS_CONFIG1 \
-((1 << CP0C1_M) | \
- (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
- (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
- (0 << CP0C1_FP))
-
-/* Have config3, no tertiary/secondary caches implemented */
-#define MIPS_CONFIG2 \
-((1 << CP0C2_M))
-
-/* No config4, no DSP ASE, no large physaddr (PABITS),
- no external interrupt controller, no vectored interrupts,
- no 1kb pages, no SmartMIPS ASE, no trace logic */
-#define MIPS_CONFIG3 \
-((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
- (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
- (0 << CP0C3_SM) | (0 << CP0C3_TL))
-
/* MMU types, the first four entries have the same layout as the
CP0C0_MT field. */
enum mips_mmu_types {
@@ -321,7 +309,9 @@ enum mips_mmu_types {
*/
#define T_TRAP 13 /* Trap instruction */
#define T_VCEI 14 /* Virtual coherency exception */
+#define T_MSAFPE 14 /* MSA floating point exception */
#define T_FPE 15 /* Floating point exception */
+#define T_MSADIS 21 /* MSA disabled exception */
#define T_WATCH 23 /* Watch address reference */
#define T_VCED 31 /* Virtual coherency data */
@@ -374,6 +364,9 @@ struct kvm_mips_tlb {
long tlb_lo1;
};
+#define KVM_MIPS_FPU_FPU 0x1
+#define KVM_MIPS_FPU_MSA 0x2
+
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
@@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
+ /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
+ unsigned int fpu_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
/* WAIT executed */
int wait;
+
+ u8 fpu_enabled;
+ u8 msa_enabled;
};
@@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
+#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
+#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
}
+/* Helpers */
+
+static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ vcpu->fpu_enabled;
+}
+
+static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_fpu(vcpu) &&
+ kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
+}
+
+static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
+ vcpu->msa_enabled;
+}
+
+static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_msa(vcpu) &&
+ kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
+}
struct kvm_mips_callbacks {
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
@@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
int (*handle_syscall)(struct kvm_vcpu *vcpu);
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*handle_trap)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
@@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 v);
+ int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
+ int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* Trampoline ASM routine to start running in "Guest" context */
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+/* FPU/MSA context management */
+void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
+void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_own_msa(struct kvm_vcpu *vcpu);
+void kvm_drop_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+
/* TLB handling */
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
+
/* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b5dcbee..9b3b48e 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -105,7 +105,7 @@ union fpureg {
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# define FPR_IDX(width, idx) (idx)
#else
-# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
+# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
#endif
#define BUILD_FPR_ACCESS(width) \
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 2c04b6d..6985eb5 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -36,77 +36,85 @@ struct kvm_regs {
/*
* for KVM_GET_FPU and KVM_SET_FPU
- *
- * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
- * are zero filled.
*/
struct kvm_fpu {
- __u64 fpr[32];
- __u32 fir;
- __u32 fccr;
- __u32 fexr;
- __u32 fenr;
- __u32 fcsr;
- __u32 pad;
};
/*
- * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows:
*
- * bits[2..0] - Register 'sel' index.
- * bits[7..3] - Register 'rd' index.
- * bits[15..8] - Must be zero.
- * bits[31..16] - 1 -> CP0 registers.
- * bits[51..32] - Must be zero.
* bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CP0 registers.
+ * bits[15..8] - Must be zero.
+ * bits[7..3] - Register 'rd' index.
+ * bits[2..0] - Register 'sel' index.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
- *
- * The registers defined in struct kvm_regs are also accessible, the
- * id values for these are below.
*/
-#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
-#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
-#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
-#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
-#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
-#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
-#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
-#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
-#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
-#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
-#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
-#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
-#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
-#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
-#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
-#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
-#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
-#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
-#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
-#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
-#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
-#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
-#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
-#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
-#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
-#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
-#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
-#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
-#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
-#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
-#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
-#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
-
-#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
-#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
-#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
-
-/* KVM specific control registers */
+#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
+#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
+#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
+
+
+/*
+ * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_MIPS_KVM - KVM specific control registers.
+ */
/*
* CP0_Count control
@@ -118,8 +126,7 @@ struct kvm_fpu {
* safely without losing time or guest timer interrupts.
* Other: Reserved, do not change.
*/
-#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 0)
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
/*
@@ -131,15 +138,46 @@ struct kvm_fpu {
* emulated.
* Modifications to times in the future are rejected.
*/
-#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 1)
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
/*
* CP0_Count rate in Hz
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
* discontinuities in CP0_Count.
*/
-#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 2)
+#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
+
+
+/*
+ * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
+ *
+ * bits[15..8] - Register subset (see definitions below).
+ * bits[7..5] - Must be zero.
+ * bits[4..0] - Register number within register subset.
+ */
+
+#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
+#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
+
+/*
+ * KVM_REG_MIPS_FPR - Floating point / Vector registers.
+ */
+#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
+#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
+#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
+
+/*
+ * KVM_REG_MIPS_FCR - Floating point control registers.
+ */
+#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
+
+/*
+ * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
+ */
+#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
+
/*
* KVM MIPS specific structures and definitions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 750d67a..e59fd7c 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
- /* the least significant 64 bits of each FP register */
- OFFSET(THREAD_FPR0_LS64, task_struct,
- thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR1_LS64, task_struct,
- thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR2_LS64, task_struct,
- thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR3_LS64, task_struct,
- thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR4_LS64, task_struct,
- thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR5_LS64, task_struct,
- thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR6_LS64, task_struct,
- thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR7_LS64, task_struct,
- thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR8_LS64, task_struct,
- thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR9_LS64, task_struct,
- thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR10_LS64, task_struct,
- thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR11_LS64, task_struct,
- thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR12_LS64, task_struct,
- thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR13_LS64, task_struct,
- thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR14_LS64, task_struct,
- thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR15_LS64, task_struct,
- thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR16_LS64, task_struct,
- thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR17_LS64, task_struct,
- thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR18_LS64, task_struct,
- thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR19_LS64, task_struct,
- thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR20_LS64, task_struct,
- thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR21_LS64, task_struct,
- thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR22_LS64, task_struct,
- thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR23_LS64, task_struct,
- thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR24_LS64, task_struct,
- thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR25_LS64, task_struct,
- thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR26_LS64, task_struct,
- thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR27_LS64, task_struct,
- thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR28_LS64, task_struct,
- thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR29_LS64, task_struct,
- thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR30_LS64, task_struct,
- thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR31_LS64, task_struct,
- thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
-
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
@@ -470,6 +404,45 @@ void output_kvm_defines(void)
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ BLANK();
+
+ OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
+ OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
+ OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
+ OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
+ OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
+ OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
+ OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
+ OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
+ OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
+ OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
+ OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
+ OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
+ OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
+ OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
+ OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
+ OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
+ OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
+ OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
+ OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
+ OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
+ OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
+ OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
+ OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
+ OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
+ OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
+ OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
+ OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
+ OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
+ OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
+ OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
+ OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
+ OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
+
+ OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
+ OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
+ BLANK();
+
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 2ebaabe..af42e70 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
- li a2, ~(0x3f << 12)
- and a2, a1
- ctc1 a2, fcr31
.set pop
- TRACE_IRQS_ON
- STI
+ CLI
+ TRACE_IRQS_OFF
+ .endm
+
+ .macro __build_clear_msa_fpe
+ _cfcmsa a1, MSA_CSR
+ CLI
+ TRACE_IRQS_OFF
.endm
.macro __build_clear_ade
@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
- BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 5104528..7da6e32 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -46,6 +46,26 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* If FP has been used then the target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+
+ /* ...and FCSR zeroed */
+ target->thread.fpu.fcr31 = 0;
+
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_stopped_child_used_math(target);
+}
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
+ init_fp_ctx(child);
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
/* XXX fcr31 */
+ init_fp_ctx(target);
+
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu,
@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
- if (!tsk_used_math(child)) {
- /* FP not yet used */
- memset(&child->thread.fpu, ~0,
- sizeof(child->thread.fpu));
- child->thread.fpu.fcr31 = 0;
- }
+ init_fp_ctx(child);
#ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 676c503..1d88af2 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,6 @@
.endm
.set noreorder
- .set MIPS_ISA_ARCH_LEVEL_RAW
LEAF(_save_fp_context)
.set push
@@ -103,6 +102,7 @@ LEAF(_save_fp_context)
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
.set push
+ .set MIPS_ISA_ARCH_LEVEL_RAW
SET_HARDFLOAT
cfc1 t1, fcr31
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 33984c0..5b4d711 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr)
{
+ /*
+ * We can't allow the emulated instruction to leave any of the cause
+ * bits set in FCSR. If they were then the kernel would take an FP
+ * exception when restoring FP context.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
SIGFPE) == NOTIFY_STOP)
goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ local_irq_enable();
+
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- /*
- * We can't allow the emulated instruction to leave any of
- * the cause bit set in $fcr31.
- */
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ /* If something went wrong, signal */
+ process_fpemu_return(sig, fault_addr);
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
-
goto out;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -1392,13 +1398,22 @@ out:
exception_exit(prev_state);
}
-asmlinkage void do_msa_fpe(struct pt_regs *regs)
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{
enum ctx_state prev_state;
prev_state = exception_enter();
+ if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
+ regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear MSACSR.Cause before enabling interrupts */
+ write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
+ local_irq_enable();
+
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE, current);
+out:
exception_exit(prev_state);
}
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 401fe02..637ebbe 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -1,13 +1,15 @@
# Makefile for KVM support for MIPS
#
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
-kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+
+kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
interrupt.o stats.o commpage.o \
- dyntrans.o trap_emul.o
+ dyntrans.o trap_emul.o fpu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index fb3e8df..6230f37 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+/**
+ * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config1 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config3 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config4 is optional */
+ unsigned int mask = MIPS_CONF_M;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config4 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config5 is optional */
+ return MIPS_CONF_M;
+}
+
+/**
+ * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config5 CP0
+ * register, by the guest itself.
+ */
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE;
+ /* We don't support UFR or UFE */
+ }
+
+ return mask;
+}
+
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
uint32_t cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt]);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
- kvm_write_c0_guest_status(cop0,
- vcpu->arch.gprs[rt]);
+ unsigned int old_val, val, change;
+
+ old_val = kvm_read_c0_guest_status(cop0);
+ val = vcpu->arch.gprs[rt];
+ change = val ^ old_val;
+
+ /* Make sure that the NMI bit is never set */
+ val &= ~ST0_NMI;
+
+ /*
+ * Don't allow CU1 or FR to be set unless FPU
+ * capability enabled and exists in guest
+ * configuration.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't
+ * support it.
+ */
+ if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+
+ /* Handle changes in FPU mode */
+ preempt_disable();
+
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ if (change & ST0_FR)
+ kvm_drop_fpu(vcpu);
+
+ /*
+ * If MSA state is already live, it is undefined
+ * how it interacts with FR=0 FPU state, and we
+ * don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later
+ * when CU=1 && FR=1, so play it safe and save
+ * it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
/*
- * Make sure that CU1 and NMI bits are
- * never set
+ * Propagate CU1 (FPU enable) changes
+ * immediately if the FPU context is already
+ * loaded. When disabling we leave the context
+ * loaded so it can be quickly enabled again in
+ * the near future.
*/
- kvm_clear_c0_guest_status(cop0,
- (ST0_CU1 | ST0_NMI));
+ if (change & ST0_CU1 &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_status(ST0_CU1, val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_status(cop0, val);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mtc0(inst, opc, vcpu);
+ /*
+ * If FPU present, we need CU1/FR bits to take
+ * effect fairly soon.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ unsigned int old_val, val, change, wrmask;
+
+ old_val = kvm_read_c0_guest_config5(cop0);
+ val = vcpu->arch.gprs[rt];
+
+ /* Only a few bits are writable in Config5 */
+ wrmask = kvm_mips_config5_wrmask(vcpu);
+ change = (val ^ old_val) & wrmask;
+ val = old_val ^ change;
+
+
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ /*
+ * Propagate MSAEn changes immediately if the
+ * MSA context is already loaded. When disabling
+ * we leave the context loaded so it can be
+ * quickly enabled again in the near future.
+ */
+ if (change & MIPS_CONF5_MSAEN &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ change_c0_config5(MIPS_CONF5_MSAEN,
+ val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
uint32_t old_cause, new_cause;
@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
+enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TRAP << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver TRAP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSAFPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_FPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver FPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSADIS << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSADIS when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
/* ll/sc, rdhwr, sync emulation */
#define OPCODE 0xfc000000
@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
case T_SYSCALL:
case T_BREAK:
case T_RES_INST:
+ case T_TRAP:
+ case T_MSAFPE:
+ case T_FPE:
+ case T_MSADIS:
break;
case T_COP_UNUSABLE:
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 0000000..531fbf5
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FPU context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ sdc1 $f1, VCPU_FPR1(a0)
+ sdc1 $f3, VCPU_FPR3(a0)
+ sdc1 $f5, VCPU_FPR5(a0)
+ sdc1 $f7, VCPU_FPR7(a0)
+ sdc1 $f9, VCPU_FPR9(a0)
+ sdc1 $f11, VCPU_FPR11(a0)
+ sdc1 $f13, VCPU_FPR13(a0)
+ sdc1 $f15, VCPU_FPR15(a0)
+ sdc1 $f17, VCPU_FPR17(a0)
+ sdc1 $f19, VCPU_FPR19(a0)
+ sdc1 $f21, VCPU_FPR21(a0)
+ sdc1 $f23, VCPU_FPR23(a0)
+ sdc1 $f25, VCPU_FPR25(a0)
+ sdc1 $f27, VCPU_FPR27(a0)
+ sdc1 $f29, VCPU_FPR29(a0)
+ sdc1 $f31, VCPU_FPR31(a0)
+1: sdc1 $f0, VCPU_FPR0(a0)
+ sdc1 $f2, VCPU_FPR2(a0)
+ sdc1 $f4, VCPU_FPR4(a0)
+ sdc1 $f6, VCPU_FPR6(a0)
+ sdc1 $f8, VCPU_FPR8(a0)
+ sdc1 $f10, VCPU_FPR10(a0)
+ sdc1 $f12, VCPU_FPR12(a0)
+ sdc1 $f14, VCPU_FPR14(a0)
+ sdc1 $f16, VCPU_FPR16(a0)
+ sdc1 $f18, VCPU_FPR18(a0)
+ sdc1 $f20, VCPU_FPR20(a0)
+ sdc1 $f22, VCPU_FPR22(a0)
+ sdc1 $f24, VCPU_FPR24(a0)
+ sdc1 $f26, VCPU_FPR26(a0)
+ sdc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ sdc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_save_fpu)
+
+LEAF(__kvm_restore_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ ldc1 $f1, VCPU_FPR1(a0)
+ ldc1 $f3, VCPU_FPR3(a0)
+ ldc1 $f5, VCPU_FPR5(a0)
+ ldc1 $f7, VCPU_FPR7(a0)
+ ldc1 $f9, VCPU_FPR9(a0)
+ ldc1 $f11, VCPU_FPR11(a0)
+ ldc1 $f13, VCPU_FPR13(a0)
+ ldc1 $f15, VCPU_FPR15(a0)
+ ldc1 $f17, VCPU_FPR17(a0)
+ ldc1 $f19, VCPU_FPR19(a0)
+ ldc1 $f21, VCPU_FPR21(a0)
+ ldc1 $f23, VCPU_FPR23(a0)
+ ldc1 $f25, VCPU_FPR25(a0)
+ ldc1 $f27, VCPU_FPR27(a0)
+ ldc1 $f29, VCPU_FPR29(a0)
+ ldc1 $f31, VCPU_FPR31(a0)
+1: ldc1 $f0, VCPU_FPR0(a0)
+ ldc1 $f2, VCPU_FPR2(a0)
+ ldc1 $f4, VCPU_FPR4(a0)
+ ldc1 $f6, VCPU_FPR6(a0)
+ ldc1 $f8, VCPU_FPR8(a0)
+ ldc1 $f10, VCPU_FPR10(a0)
+ ldc1 $f12, VCPU_FPR12(a0)
+ ldc1 $f14, VCPU_FPR14(a0)
+ ldc1 $f16, VCPU_FPR16(a0)
+ ldc1 $f18, VCPU_FPR18(a0)
+ ldc1 $f20, VCPU_FPR20(a0)
+ ldc1 $f22, VCPU_FPR22(a0)
+ ldc1 $f24, VCPU_FPR24(a0)
+ ldc1 $f26, VCPU_FPR26(a0)
+ ldc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ ldc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_restore_fpu)
+
+LEAF(__kvm_restore_fcsr)
+ .set push
+ SET_HARDFLOAT
+ lw t0, VCPU_FCR31(a0)
+ /*
+ * The ctc1 must stay at this offset in __kvm_restore_fcsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ ctc1 t0, fcr31
+ jr ra
+ nop
+ .set pop
+ END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 4a68b17..c567240 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -36,6 +36,8 @@
#define PT_HOST_USERLOCAL PT_EPC
#define CP0_DDATA_LO $28,3
+#define CP0_CONFIG3 $16,3
+#define CP0_CONFIG5 $16,5
#define CP0_EBASE $15,1
#define CP0_INTCTL $12,1
@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
+ * trigger FPE for pending exceptions.
+ */
+ .set at
+ and v1, v0, ST0_CU1
+ beqz v1, 1f
+ nop
+ .set push
+ SET_HARDFLOAT
+ cfc1 t0, fcr31
+ sw t0, VCPU_FCR31(k1)
+ ctc1 zero,fcr31
+ .set pop
+ .set noat
+1:
+
+#ifdef CONFIG_CPU_HAS_MSA
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ mfc0 t0, CP0_CONFIG3
+ ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
+ beqz t0, 1f
+ nop
+ mfc0 t0, CP0_CONFIG5
+ ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
+ beqz t0, 1f
+ nop
+ _cfcmsa t0, MSA_CSR
+ sw t0, VCPU_MSA_CSR(k1)
+ _ctcmsa MSA_CSR, zero
+1:
+#endif
+
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index c9eccf5..bb68e8d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
+ { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
+ { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
+ { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
+ { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
KVM_REG_MIPS_CP0_CONFIG7,
KVM_REG_MIPS_CP0_ERROREPC,
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
+ s64 vs[2];
+ unsigned int idx;
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
v = (long)vcpu->arch.pc;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ v = get_fpr32(&fpu->fpr[idx], 0);
+ else
+ v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ v = get_fpr64(&fpu->fpr[idx], 0);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.fpu_id;
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->fcr31;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Can't access MSA registers in FR=0 mode */
+ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 0);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 1);
+#else
+ /* most significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 1);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 0);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.msa_id;
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->msacsr;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
v = (long)kvm_read_c0_guest_index(cop0);
break;
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
v = (long)kvm_read_c0_guest_epc(cop0);
break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- v = (long)kvm_read_c0_guest_errorepc(cop0);
+ case KVM_REG_MIPS_CP0_PRID:
+ v = (long)kvm_read_c0_guest_prid(cop0);
break;
case KVM_REG_MIPS_CP0_CONFIG:
v = (long)kvm_read_c0_guest_config(cop0);
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_CONFIG3:
v = (long)kvm_read_c0_guest_config3(cop0);
break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ v = (long)kvm_read_c0_guest_config4(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ v = (long)kvm_read_c0_guest_config5(cop0);
+ break;
case KVM_REG_MIPS_CP0_CONFIG7:
v = (long)kvm_read_c0_guest_config7(cop0);
break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ v = (long)kvm_read_c0_guest_errorepc(cop0);
+ break;
/* registers to be handled specially */
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_COUNT_CTL:
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
u32 v32 = (u32)v;
return put_user(v32, uaddr32);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_to_user(uaddr, vs, 16);
} else {
return -EINVAL;
}
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- u64 v;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ s64 v;
+ s64 vs[2];
+ unsigned int idx;
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
if (get_user(v32, uaddr32) != 0)
return -EFAULT;
v = (s64)v32;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_from_user(vs, uaddr, 16);
} else {
return -EINVAL;
}
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0:
/* Silently ignore requests to set $0 */
break;
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
vcpu->arch.pc = v;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ set_fpr32(&fpu->fpr[idx], 0, v);
+ else
+ set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ set_fpr64(&fpu->fpr[idx], 0, v);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ fpu->fcr31 = v;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ set_fpr64(&fpu->fpr[idx], 0, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 1, vs[1]);
+#else
+ /* most significant byte first */
+ set_fpr64(&fpu->fpr[idx], 1, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 0, vs[1]);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ fpu->msacsr = v;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
kvm_write_c0_guest_index(cop0, v);
break;
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
kvm_write_c0_guest_epc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_PRID:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_CP0_COMPARE:
case KVM_REG_MIPS_CP0_CAUSE:
+ case KVM_REG_MIPS_CP0_CONFIG:
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ case KVM_REG_MIPS_CP0_CONFIG5:
case KVM_REG_MIPS_COUNT_CTL:
case KVM_REG_MIPS_COUNT_RESUME:
case KVM_REG_MIPS_COUNT_HZ:
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
+ return -EINVAL;
+ if (cap->flags)
+ return -EINVAL;
+ if (cap->args[0])
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_MIPS_FPU:
+ vcpu->arch.fpu_enabled = true;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ vcpu->arch.msa_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -ENOIOCTLCMD;
}
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
switch (ext) {
case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+ case KVM_CAP_MIPS_FPU:
+ r = !!cpu_has_fpu;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ /*
+ * We don't support MSA vector partitioning yet:
+ * 1) It would require explicit support which can't be tested
+ * yet due to lack of support in current hardware.
+ * 2) It extends the state that would need to be saved/restored
+ * by e.g. QEMU for migration.
+ *
+ * When vector partitioning hardware becomes available, support
+ * could be added by requiring a flag when enabling
+ * KVM_CAP_MIPS_MSA capability to indicate that userland knows
+ * to save/restore the appropriate extra state.
+ */
+ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
+ break;
default:
r = 0;
break;
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
+ case T_TRAP:
+ ++vcpu->stat.trap_inst_exits;
+ trace_kvm_exit(vcpu, TRAP_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_trap(vcpu);
+ break;
+
+ case T_MSAFPE:
+ ++vcpu->stat.msa_fpe_exits;
+ trace_kvm_exit(vcpu, MSA_FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
+ break;
+
+ case T_FPE:
+ ++vcpu->stat.fpe_exits;
+ trace_kvm_exit(vcpu, FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_fpe(vcpu);
+ break;
+
+ case T_MSADIS:
+ ++vcpu->stat.msa_disabled_exits;
+ trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+ break;
+
default:
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
@@ -1146,12 +1386,233 @@ skip_emul:
}
}
+ if (ret == RESUME_GUEST) {
+ /*
+ * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
+ * is live), restore FCR31 / MSACSR.
+ *
+ * This should be before returning to the guest exception
+ * vector, as it may well cause an [MSA] FP exception if there
+ * are pending exception bits unmasked. (see
+ * kvm_mips_csr_die_notifier() for how that is handled).
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
+ read_c0_status() & ST0_CU1)
+ __kvm_restore_fcsr(&vcpu->arch);
+
+ if (kvm_mips_guest_has_msa(&vcpu->arch) &&
+ read_c0_config5() & MIPS_CONF5_MSAEN)
+ __kvm_restore_msacsr(&vcpu->arch);
+ }
+
/* Disable HTW before returning to guest or host */
htw_stop();
return ret;
}
+/* Enable FPU for guest and restore context */
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If MSA state is already live, it is undefined how it interacts with
+ * FR=0 FPU state, and we don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later when CU=1 && FR=1, so
+ * play it safe and save it first.
+ *
+ * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
+ * get called when guest CU1 is set, however we can't trust the guest
+ * not to clobber the status register directly via the commpage.
+ */
+ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
+ /*
+ * Enable FPU for guest
+ * We set FR and FRE according to guest context
+ */
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ enable_fpu_hazard();
+
+ /* If guest FPU state not active, restore it now */
+ if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ __kvm_restore_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ }
+
+ preempt_enable();
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+/* Enable MSA for guest and restore context */
+void kvm_own_msa(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ /*
+ * Enable FPU if enabled in guest, since we're restoring FPU context
+ * anyway. We set FR and FRE according to guest context.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If FR=0 FPU state is already live, it is undefined how it
+ * interacts with MSA state, so play it safe and save it first.
+ */
+ if (!(sr & ST0_FR) &&
+ (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
+ KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ kvm_lose_fpu(vcpu);
+
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (sr & ST0_CU1 && cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ }
+
+ /* Enable MSA for guest */
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
+ case KVM_MIPS_FPU_FPU:
+ /*
+ * Guest FPU state already loaded, only restore upper MSA state
+ */
+ __kvm_restore_msa_upper(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ break;
+ case 0:
+ /* Neither FPU or MSA already active, restore full MSA state */
+ __kvm_restore_msa(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ if (kvm_mips_guest_has_fpu(&vcpu->arch))
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ break;
+ default:
+ break;
+ }
+
+ preempt_enable();
+}
+#endif
+
+/* Drop FPU & MSA without saving it */
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ disable_msa();
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ }
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ }
+ preempt_enable();
+}
+
+/* Save and disable FPU & MSA */
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
+{
+ /*
+ * FPU & MSA get disabled in root context (hardware) when it is disabled
+ * in guest context (software), but the register state in the hardware
+ * may still be in use. This is why we explicitly re-enable the hardware
+ * before saving.
+ */
+
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ __kvm_save_msa(&vcpu->arch);
+
+ /* Disable MSA & FPU */
+ disable_msa();
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
+ } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+
+ __kvm_save_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+
+ /* Disable FPU */
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ }
+ preempt_enable();
+}
+
+/*
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
+ * exception if cause bits are set in the value being written.
+ */
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
+ unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long pc;
+
+ /* Only interested in FPE and MSAFPE */
+ if (cmd != DIE_FP && cmd != DIE_MSAFP)
+ return NOTIFY_DONE;
+
+ /* Return immediately if guest context isn't active */
+ if (!(current->flags & PF_VCPU))
+ return NOTIFY_DONE;
+
+ /* Should never get here from user mode */
+ BUG_ON(user_mode(regs));
+
+ pc = instruction_pointer(regs);
+ switch (cmd) {
+ case DIE_FP:
+ /* match 2nd instruction in __kvm_restore_fcsr */
+ if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
+ return NOTIFY_DONE;
+ break;
+ case DIE_MSAFP:
+ /* match 2nd/3rd instruction in __kvm_restore_msacsr */
+ if (!cpu_has_msa ||
+ pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
+ pc > (unsigned long)&__kvm_restore_msacsr + 8)
+ return NOTIFY_DONE;
+ break;
+ }
+
+ /* Move PC forward a little and continue executing */
+ instruction_pointer(regs) += 4;
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kvm_mips_csr_die_notifier = {
+ .notifier_call = kvm_mips_csr_die_notify,
+};
+
int __init kvm_mips_init(void)
{
int ret;
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
if (ret)
return ret;
+ register_die_notifier(&kvm_mips_csr_die_notifier);
+
/*
* On MIPS, kernel modules are executed from "mapped space", which
* requires TLBs. The TLB handling code is statically linked with
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
kvm_mips_is_error_pfn = is_error_pfn;
- pr_info("KVM/MIPS Initialized\n");
return 0;
}
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
kvm_mips_release_pfn_clean = NULL;
kvm_mips_is_error_pfn = NULL;
- pr_info("KVM/MIPS unloaded\n");
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
module_init(kvm_mips_init);
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 0000000..d02f0c6
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * MIPS SIMD Architecture (MSA) context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_msa)
+ st_d 0, VCPU_FPR0, a0
+ st_d 1, VCPU_FPR1, a0
+ st_d 2, VCPU_FPR2, a0
+ st_d 3, VCPU_FPR3, a0
+ st_d 4, VCPU_FPR4, a0
+ st_d 5, VCPU_FPR5, a0
+ st_d 6, VCPU_FPR6, a0
+ st_d 7, VCPU_FPR7, a0
+ st_d 8, VCPU_FPR8, a0
+ st_d 9, VCPU_FPR9, a0
+ st_d 10, VCPU_FPR10, a0
+ st_d 11, VCPU_FPR11, a0
+ st_d 12, VCPU_FPR12, a0
+ st_d 13, VCPU_FPR13, a0
+ st_d 14, VCPU_FPR14, a0
+ st_d 15, VCPU_FPR15, a0
+ st_d 16, VCPU_FPR16, a0
+ st_d 17, VCPU_FPR17, a0
+ st_d 18, VCPU_FPR18, a0
+ st_d 19, VCPU_FPR19, a0
+ st_d 20, VCPU_FPR20, a0
+ st_d 21, VCPU_FPR21, a0
+ st_d 22, VCPU_FPR22, a0
+ st_d 23, VCPU_FPR23, a0
+ st_d 24, VCPU_FPR24, a0
+ st_d 25, VCPU_FPR25, a0
+ st_d 26, VCPU_FPR26, a0
+ st_d 27, VCPU_FPR27, a0
+ st_d 28, VCPU_FPR28, a0
+ st_d 29, VCPU_FPR29, a0
+ st_d 30, VCPU_FPR30, a0
+ st_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_save_msa)
+
+LEAF(__kvm_restore_msa)
+ ld_d 0, VCPU_FPR0, a0
+ ld_d 1, VCPU_FPR1, a0
+ ld_d 2, VCPU_FPR2, a0
+ ld_d 3, VCPU_FPR3, a0
+ ld_d 4, VCPU_FPR4, a0
+ ld_d 5, VCPU_FPR5, a0
+ ld_d 6, VCPU_FPR6, a0
+ ld_d 7, VCPU_FPR7, a0
+ ld_d 8, VCPU_FPR8, a0
+ ld_d 9, VCPU_FPR9, a0
+ ld_d 10, VCPU_FPR10, a0
+ ld_d 11, VCPU_FPR11, a0
+ ld_d 12, VCPU_FPR12, a0
+ ld_d 13, VCPU_FPR13, a0
+ ld_d 14, VCPU_FPR14, a0
+ ld_d 15, VCPU_FPR15, a0
+ ld_d 16, VCPU_FPR16, a0
+ ld_d 17, VCPU_FPR17, a0
+ ld_d 18, VCPU_FPR18, a0
+ ld_d 19, VCPU_FPR19, a0
+ ld_d 20, VCPU_FPR20, a0
+ ld_d 21, VCPU_FPR21, a0
+ ld_d 22, VCPU_FPR22, a0
+ ld_d 23, VCPU_FPR23, a0
+ ld_d 24, VCPU_FPR24, a0
+ ld_d 25, VCPU_FPR25, a0
+ ld_d 26, VCPU_FPR26, a0
+ ld_d 27, VCPU_FPR27, a0
+ ld_d 28, VCPU_FPR28, a0
+ ld_d 29, VCPU_FPR29, a0
+ ld_d 30, VCPU_FPR30, a0
+ ld_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa)
+
+ .macro kvm_restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ lw $1, \off(\base)
+ insert_w \wr, 2
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(__kvm_restore_msa_upper)
+ kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
+ kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
+ kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
+ kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
+ kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
+ kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
+ kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
+ kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
+ kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
+ kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
+ kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
+ kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
+ kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
+ kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
+ kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
+ kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
+ kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
+ kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
+ kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
+ kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
+ kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
+ kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
+ kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
+ kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
+ kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
+ kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
+ kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
+ kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
+ kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
+ kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
+ kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
+ kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa_upper)
+
+LEAF(__kvm_restore_msacsr)
+ lw t0, VCPU_MSA_CSR(a0)
+ /*
+ * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an MSA FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ _ctcmsa MSA_CSR, t0
+ jr ra
+ nop
+ END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index a74d602..888bb67 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
"System Call",
"Reserved Inst",
"Break Inst",
+ "Trap Inst",
+ "MSA FPE",
+ "FPE",
+ "MSA Disabled",
"D-Cache Flushes",
};
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index b6beb0e..aed0ac2 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
}
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
local_irq_restore(flags);
}
@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu;
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd7257b..d836ed5 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
- else
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /* FPU Unusable */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
+ /*
+ * Unusable/no FPU in guest:
+ * deliver guest COP1 Unusable Exception
+ */
+ er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore FPU state */
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ } else {
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ }
switch (er) {
case EMULATE_DONE:
@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled.
+ */
+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
+ /*
+ * No MSA in guest, or FPU enabled and not in FR=1 mode,
+ * guest reserved instruction exception
+ */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
+ /* MSA disabled by guest, guest MSA disabled exception */
+ er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore MSA/FPU state */
+ kvm_own_msa(vcpu);
+ er = EMULATE_DONE;
+ }
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
* guest will come up as expected, for now we simulate a MIPS 24kc
*/
kvm_write_c0_guest_prid(cop0, 0x00019300);
- kvm_write_c0_guest_config(cop0,
- MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ /* Have config1, Cacheable, noncoherent, write-back, write allocate */
+ kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
+ (0x1 << CP0C0_AR) |
(MMU_TYPE_R4000 << CP0C0_MT));
/* Read the cache characteristics from the host Config1 Register */
@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
(1 << CP0C1_WR) | (1 << CP0C1_CA));
kvm_write_c0_guest_config1(cop0, config1);
- kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
- /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
- kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
- (1 << CP0C3_ULRI));
+ /* Have config3, no tertiary/secondary caches implemented */
+ kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
+ /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
+
+ /* Have config4, UserLocal */
+ kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
+
+ /* Have config5 */
+ kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
+
+ /* No config6 */
+ kvm_write_c0_guest_config5(cop0, 0);
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int ret = 0;
+ unsigned int cur, change;
switch (reg->id) {
case KVM_REG_MIPS_CP0_COUNT:
@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_write_c0_guest_cause(cop0, v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ cur = kvm_read_c0_guest_config1(cop0);
+ change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config1(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ cur = kvm_read_c0_guest_config3(cop0);
+ change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config3(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ cur = kvm_read_c0_guest_config4(cop0);
+ change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config4(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ cur = kvm_read_c0_guest_config5(cop0);
+ change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config5(cop0, v);
+ }
+ break;
case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v);
break;
@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
return ret;
}
+static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_lose_fpu(vcpu);
+
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break,
+ .handle_trap = kvm_trap_emul_handle_trap,
+ .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
+ .handle_fpe = kvm_trap_emul_handle_fpe,
+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init,
@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.irq_clear = kvm_mips_irq_clear_cb,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
+ .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
+ .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index 3b7f65c..cf9b4633 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -75,11 +75,11 @@ static int rtctmp;
int proc_dolasatrtc(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- struct timespec ts;
+ struct timespec64 ts;
int r;
if (!write) {
- read_persistent_clock(&ts);
+ read_persistent_clock64(&ts);
rtctmp = ts.tv_sec;
/* check for time < 0 and set to 0 */
if (rtctmp < 0)
diff --git a/arch/mips/loongson/loongson-3/hpet.c b/arch/mips/loongson/loongson-3/hpet.c
index e898d68..5c21cd3 100644
--- a/arch/mips/loongson/loongson-3/hpet.c
+++ b/arch/mips/loongson/loongson-3/hpet.c
@@ -162,7 +162,7 @@ static irqreturn_t hpet_irq_handler(int irq, void *data)
static struct irqaction hpet_irq = {
.handler = hpet_irq_handler,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER,
.name = "hpet",
};
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index f1baadd..5c81fdd 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -142,18 +142,26 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
addr0, len, pgoff, flags, DOWN);
}
+unsigned long arch_mmap_rnd(void)
+{
+ unsigned long rnd;
+
+ rnd = (unsigned long)get_random_int();
+ rnd <<= PAGE_SHIFT;
+ if (TASK_IS_32BIT_ADDR)
+ rnd &= 0xfffffful;
+ else
+ rnd &= 0xffffffful;
+
+ return rnd;
+}
+
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
- if (TASK_IS_32BIT_ADDR)
- random_factor &= 0xfffffful;
- else
- random_factor &= 0xffffffful;
- }
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 1bf60b1..8bb13a4 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -94,27 +94,29 @@ static void pcibios_scanbus(struct pci_controller *hose)
pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset);
bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
&resources);
- if (!bus)
- pci_free_resource_list(&resources);
-
hose->bus = bus;
need_domain_info = need_domain_info || hose->index;
hose->need_domain_info = need_domain_info;
- if (bus) {
- next_busno = bus->busn_res.end + 1;
- /* Don't allow 8-bit bus number overflow inside the hose -
- reserve some space for bridges. */
- if (next_busno > 224) {
- next_busno = 0;
- need_domain_info = 1;
- }
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- }
+ if (!bus) {
+ pci_free_resource_list(&resources);
+ return;
+ }
+
+ next_busno = bus->busn_res.end + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
+ }
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
}
+ pci_bus_add_devices(bus);
}
#ifdef CONFIG_OF
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 613ca1e..3dfe2d3 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -342,6 +342,7 @@ static int __init pcibios_init(void)
{
resource_size_t io_offset, mem_offset;
LIST_HEAD(resources);
+ struct pci_bus *bus;
ioport_resource.start = 0xA0000000;
ioport_resource.end = 0xDFFFFFFF;
@@ -371,11 +372,14 @@ static int __init pcibios_init(void)
pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset);
pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset);
- pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources);
+ bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources);
+ if (!bus)
+ return 0;
pcibios_irq_init();
pcibios_fixup_irqs();
pcibios_resource_survey();
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
index 1f26657..a16e55c 100644
--- a/arch/nios2/include/asm/thread_info.h
+++ b/arch/nios2/include/asm/thread_info.h
@@ -47,7 +47,6 @@ struct thread_info {
0-0x7FFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
struct pt_regs *regs;
};
@@ -64,9 +63,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 37613119..e0bb972 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,6 +1,5 @@
include include/uapi/asm-generic/Kbuild.asm
header-y += elf.h
-header-y += ucontext.h
generic-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index 71a3305..eff00e67 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -60,12 +60,17 @@
#define PTR_IPENDING 37
#define PTR_CPUID 38
#define PTR_CTL6 39
-#define PTR_CTL7 40
+#define PTR_EXCEPTION 40
#define PTR_PTEADDR 41
#define PTR_TLBACC 42
#define PTR_TLBMISC 43
+#define PTR_ECCINJ 44
+#define PTR_BADADDR 45
+#define PTR_CONFIG 46
+#define PTR_MPUBASE 47
+#define PTR_MPUACC 48
-#define NUM_PTRACE_REG (PTR_TLBMISC + 1)
+#define NUM_PTRACE_REG (PTR_MPUACC + 1)
/* User structures for general purpose registers. */
struct user_pt_regs {
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 7729bd3..27b006c 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -161,7 +161,7 @@ ENTRY(inthandler)
***********************************************************************
*/
ENTRY(handle_trap)
- ldw r24, -4(ea) /* instruction that caused the exception */
+ ldwio r24, -4(ea) /* instruction that caused the exception */
srli r24, r24, 4
andi r24, r24, 0x7c
movia r9,trap_table
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 0e075b5..2f8c74f 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -94,7 +94,6 @@ void show_regs(struct pt_regs *regs)
void flush_thread(void)
{
- set_fs(USER_DS);
}
int copy_thread(unsigned long clone_flags,
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index dda41e4..20662b0 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -43,7 +43,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs,
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 2ae482b4..7966429 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -23,9 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1);
- if (end > start + cpuinfo.dcache_size)
- end = start + cpuinfo.dcache_size;
-
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" flushda 0(%0)\n"
: /* Outputs */
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 0d231ad..0c9b6af 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -126,7 +126,6 @@ good_area:
break;
}
-survive:
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
@@ -220,11 +219,6 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(tsk)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 386af25..7095dfe 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -197,7 +197,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM;
- set_fs(USER_DS);
memset(regs, 0, sizeof(struct pt_regs));
regs->pc = pc;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8014727..c365469 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -103,6 +103,11 @@ config ARCH_MAY_HAVE_PC_FDC
depends on BROKEN
default y
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b..1ba2936 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,13 +45,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
}
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* Three Level Page Table Support for pmd's */
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
-#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- /* This is the permanent pmd attached to the pgd;
- * cannot free it */
+ /*
+ * This is the permanent pmd attached to the pgd;
+ * cannot free it.
+ * Increment the counter to compensate for the decrement
+ * done by generic mm code.
+ */
+ mm_inc_nr_pmds(mm);
return;
-#endif
free_pages((unsigned long)pmd, PMD_ORDER);
}
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 15207b9..0a18375 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -68,13 +68,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
-#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
-#define PT_NLEVELS 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
#else
-#define PT_NLEVELS 2
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER
#endif
@@ -93,7 +91,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
#define __PAGETABLE_PMD_FOLDED
@@ -277,7 +275,7 @@ extern unsigned long *empty_zero_page;
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* The first entry of the permanent pmd is not there if it contains
* the gateway marker */
#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
@@ -287,7 +285,7 @@ extern unsigned long *empty_zero_page;
#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the entry pointing to the permanent pmd
* attached to the pgd; cannot clear it */
@@ -299,7 +297,7 @@ static inline void pmd_clear(pmd_t *pmd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
@@ -309,7 +307,7 @@ static inline void pmd_clear(pmd_t *pmd) {
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd; cannot
* free it */
@@ -393,7 +391,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* Find an entry in the second-level page table.. */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
#define pmd_offset(dir,address) \
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
#else
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 2ab16bb..7581961 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -398,7 +398,7 @@
* can address up to 1TB
*/
.macro L2_ptep pmd,pte,index,va,fault
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
# if defined(CONFIG_64BIT)
@@ -436,7 +436,7 @@
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault
-#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index d4dc588..e7d6452 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -74,7 +74,7 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
@@ -97,7 +97,7 @@ $bss_loop:
stw %r3,0(%r4)
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d..8eefb12 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
- ENTRY_SAME(restart_syscall) /* 0 */
- ENTRY_SAME(exit)
+90: ENTRY_SAME(restart_syscall) /* 0 */
+91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
@@ -439,7 +439,10 @@
ENTRY_SAME(bpf)
ENTRY_COMP(execveat)
- /* Nothing yet */
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
#undef ENTRY_SAME
#undef ENTRY_DIFF
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 15dbe81..c229427 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -34,7 +34,7 @@
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
-#if PT_NLEVELS == 3
+#if CONFIG_PGTABLE_LEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940..e99014a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -88,7 +88,7 @@ config PPC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
@@ -297,6 +297,12 @@ config ZONE_DMA32
bool
default y if PPC64
+config PGTABLE_LEVELS
+ int
+ default 2 if !PPC64
+ default 3 if PPC_64K_PAGES
+ default 4
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 2bf8e93..4c8ad59 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
static inline int cpu_nr_cores(void)
{
- return NR_CPUS >> threads_shift;
+ return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 57d289a..ee46ffe 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -128,10 +128,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
(0x7ff >> (PAGE_SHIFT - 12)) : \
(0x3ffff >> (PAGE_SHIFT - 12)))
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
-
#ifdef CONFIG_SPU_BASE
/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
#define NT_SPU 1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 942c7b1..9930904 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -106,10 +106,6 @@ struct kvmppc_vcpu_book3s {
spinlock_t mmu_lock;
};
-#define CONTEXT_HOST 0
-#define CONTEXT_GUEST 1
-#define CONTEXT_GUEST_END 2
-
#define VSID_REAL 0x07ffffffffc00000ULL
#define VSID_BAT 0x07ffffffffb00000ULL
#define VSID_64K 0x0800000000000000ULL
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858..4cbe23a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_MSGCLR 0x7c0001dc
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
@@ -309,6 +310,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
+#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
+ ___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb..af56b5c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
+#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
* may not be recoverable */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index 6e909f3..37d2da6 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -478,7 +478,7 @@ extern unsigned long smu_cmdbuf_abs;
/*
- * Kenrel asynchronous i2c interface
+ * Kernel asynchronous i2c interface
*/
#define SMU_I2C_READ_MAX 0x1d
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666..f830468 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power8NVL */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004c0000,
+ .cpu_name = "POWER8NVL (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f421781..2128f3a 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
#include <asm/dbell.h>
#include <asm/irq_regs.h>
+#include <asm/kvm_ppc.h>
#ifdef CONFIG_SMP
void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
+ kvmppc_set_host_ipi(smp_processor_id(), 0);
__this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df815..9519e6b 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
bne 9f /* continue in V mode if we are. */
5:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* We are coming from kernel context. Check if we are coming from
* guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b8e15c6..308c5e1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -721,7 +721,7 @@ void __init early_init_devtree(void *params)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
if (boot_cpuid < 0) {
- printk("Failed to indentify boot CPU !\n");
+ printk("Failed to identify boot CPU !\n");
BUG();
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a..de74756 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca)
- yield_count = lppaca->yield_count;
+ yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count;
}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
+ struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
+ mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
- struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu;
int i;
- mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
- mutex_unlock(&kvm->lock);
}
/*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
+ mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f..6cbf163 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
+ stw r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 39b3a8f..6249cdc 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -34,7 +34,7 @@
#include <asm/kvm_para.h>
#include <asm/kvm_host.h>
#include <asm/kvm_ppc.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#define MAX_CPU 32
#define MAX_SRC 256
@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
clear_bit(n_IRQ, q->queue);
}
-static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
-{
- return test_bit(n_IRQ, q->queue);
-}
-
static void IRQ_check(struct openpic *opp, struct irq_queue *q)
{
int irq = -1;
@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
return -ENXIO;
}
-static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
- int len, void *ptr)
+static int kvm_mpic_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
return ret;
}
-static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
- int len, const void *ptr)
+static int kvm_mpic_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, const void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27c0fac..24bfe40 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index cb8bdbe..0f0502e 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -53,21 +53,20 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
+
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+ rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+ else
+ rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
- if (current->flags & PF_RANDOMIZE) {
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
- else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
- }
return rnd << PAGE_SHIFT;
}
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -76,7 +75,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/*
@@ -85,6 +84,11 @@ static inline unsigned long mmap_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
@@ -93,7 +97,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7c4f669..7fd60dc 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
-static void power_pmu_flush_branch_stack(void) {}
+static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -350,6 +350,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
cpuhw->bhrb_context = event->ctx;
}
cpuhw->bhrb_users++;
+ perf_sched_cb_inc(event->ctx->pmu);
}
static void power_pmu_bhrb_disable(struct perf_event *event)
@@ -361,6 +362,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
cpuhw->bhrb_users--;
WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+ perf_sched_cb_dec(event->ctx->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other
@@ -375,9 +377,12 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
-static void power_pmu_flush_branch_stack(void)
+static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
- if (ppmu->bhrb_nr)
+ if (!ppmu->bhrb_nr)
+ return;
+
+ if (sched_in)
power_pmu_bhrb_reset();
}
/* Calculate the to address for a branch */
@@ -1901,7 +1906,7 @@ static struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
- .flush_branch_stack = power_pmu_flush_branch_stack,
+ .sched_task = power_pmu_sched_task,
};
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 9445a82..abeb9ec 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1126,7 +1126,7 @@ static int h_24x7_event_init(struct perf_event *event)
/* Physical domains & other lpars require extra capabilities */
if (!caps.collect_privileged && (is_physical_domain(domain) ||
(event_get_lpar(event) != event_get_lpar_max()))) {
- pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
+ pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
is_physical_domain(domain),
event_get_lpar(event));
return -EACCES;
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 7a180f0..680232d 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -50,14 +50,14 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock)
/* Map the global utilities registers. */
guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
if (!guts_np) {
- pr_err("p1022rdk: missing global utilties device node\n");
+ pr_err("p1022rdk: missing global utilities device node\n");
return;
}
guts = of_iomap(guts_np, 0);
of_node_put(guts_np);
if (!guts) {
- pr_err("p1022rdk: could not map global utilties device\n");
+ pr_err("p1022rdk: could not map global utilities device\n");
return;
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 0509bca..fcbe899 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -9,11 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/ppc_asm.h>
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
-#include <asm/jump_label.h>
.section ".text"
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025..38a4508 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
#include "powernv.h"
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- unsigned long srr1;
+ unsigned long srr1, wmask;
u32 idle_states;
/* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
+ wmask = SRR1_WAKEMASK;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ wmask = SRR1_WAKEMASK_P8;
+
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
- if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+ if ((srr1 & wmask) == SRR1_WAKEEE) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
+ } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+ kvmppc_set_host_ipi(cpu, 0);
}
if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index ccd53f9..74b5b8e 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -7,12 +7,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/hvcall.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#include <asm/jump_label.h>
.section ".text"
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index b5682fd..b7a67e3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/static_key.h>
+#include <linux/jump_label.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dc..8f35d52 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
static struct kobject *mobility_kobj;
struct update_props_workarea {
- u32 phandle;
- u32 state;
- u64 reserved;
- u32 nprops;
+ __be32 phandle;
+ __be32 state;
+ __be64 reserved;
+ __be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
{
struct device_node *dn;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn)
return -ENOENT;
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 nprops;
u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(rtas_buf);
return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
break;
prop_data = rtas_buf + sizeof(*upwa);
+ nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
*/
if (*prop_data == 0) {
prop_data++;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
- upwa->nprops--;
+ nprops--;
}
- for (i = 0; i < upwa->nprops; i++) {
+ for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
return 0;
}
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
{
struct device_node *dn;
struct device_node *parent_dn;
int rc;
- parent_dn = of_find_node_by_phandle(parent_phandle);
+ parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
if (!parent_dn)
return -ENOENT;
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
- u32 *data;
+ __be32 *data;
int update_nodes_token;
int rc;
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
if (rc && rc != 1)
break;
- data = (u32 *)rtas_buf + 4;
- while (*data & NODE_ACTION_MASK) {
+ data = (__be32 *)rtas_buf + 4;
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
- u32 action = *data & NODE_ACTION_MASK;
- int node_count = *data & NODE_COUNT_MASK;
+ u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+ u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
- u32 phandle = *data++;
- u32 drc_index;
+ __be32 phandle = *data++;
+ __be32 drc_index;
switch (action) {
case DELETE_DT_NODE:
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 647c3ec..2938934 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -4,6 +4,5 @@ obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_CRYPTO_HW) += crypto/
obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
-obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373cd5b..a5ced5c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -35,7 +35,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config ARCH_DMA_ADDR_T_64BIT
- def_bool 64BIT
+ def_bool y
config GENERIC_LOCKBREAK
def_bool y if SMP && PREEMPT
@@ -59,12 +59,13 @@ config PCI_QUIRKS
def_bool n
config ARCH_SUPPORTS_UPROBES
- def_bool 64BIT
+ def_bool y
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -110,19 +111,19 @@ config S390
select GENERIC_TIME_VSYSCALL
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
- select HAVE_BPF_JIT if 64BIT && PACK_STACK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_BPF_JIT if PACK_STACK
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
- select HAVE_DYNAMIC_FTRACE if 64BIT
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
- select HAVE_FUNCTION_TRACER if 64BIT
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -132,7 +133,8 @@ config S390
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_KVM if 64BIT
+ select HAVE_KVM
+ select HAVE_LIVEPATCH
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
@@ -141,7 +143,6 @@ config S390
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
@@ -155,10 +156,17 @@ config S390
config SCHED_OMIT_FRAME_POINTER
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 4 if 64BIT
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
+source "kernel/livepatch/Kconfig"
+
menu "Processor type and features"
config HAVE_MARCH_Z900_FEATURES
@@ -190,18 +198,11 @@ config HAVE_MARCH_Z13_FEATURES
choice
prompt "Processor type"
- default MARCH_G5
-
-config MARCH_G5
- bool "System/390 model G5 and G6"
- depends on !64BIT
- help
- Select this to build a 31 bit kernel that works
- on all ESA/390 and z/Architecture machines.
+ default MARCH_Z900
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
- select HAVE_MARCH_Z900_FEATURES if 64BIT
+ select HAVE_MARCH_Z900_FEATURES
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
@@ -209,7 +210,7 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
- select HAVE_MARCH_Z990_FEATURES if 64BIT
+ select HAVE_MARCH_Z990_FEATURES
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
@@ -217,7 +218,7 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
- select HAVE_MARCH_Z9_109_FEATURES if 64BIT
+ select HAVE_MARCH_Z9_109_FEATURES
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
@@ -225,7 +226,7 @@ config MARCH_Z9_109
config MARCH_Z10
bool "IBM System z10"
- select HAVE_MARCH_Z10_FEATURES if 64BIT
+ select HAVE_MARCH_Z10_FEATURES
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
@@ -233,7 +234,7 @@ config MARCH_Z10
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
- select HAVE_MARCH_Z196_FEATURES if 64BIT
+ select HAVE_MARCH_Z196_FEATURES
help
Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will
@@ -241,7 +242,7 @@ config MARCH_Z196
config MARCH_ZEC12
bool "IBM zBC12 and zEC12"
- select HAVE_MARCH_ZEC12_FEATURES if 64BIT
+ select HAVE_MARCH_ZEC12_FEATURES
help
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on
@@ -249,7 +250,7 @@ config MARCH_ZEC12
config MARCH_Z13
bool "IBM z13"
- select HAVE_MARCH_Z13_FEATURES if 64BIT
+ select HAVE_MARCH_Z13_FEATURES
help
Select this to enable optimizations for IBM z13 (2964 series).
The kernel will be slightly faster but will not work on older
@@ -257,9 +258,6 @@ config MARCH_Z13
endchoice
-config MARCH_G5_TUNE
- def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
-
config MARCH_Z900_TUNE
def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
@@ -298,9 +296,6 @@ config TUNE_DEFAULT
Tune the generated code for the target processor for which the kernel
will be compiled.
-config TUNE_G5
- bool "System/390 model G5 and G6"
-
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
@@ -326,18 +321,10 @@ endchoice
config 64BIT
def_bool y
- prompt "64 bit kernel"
- help
- Select this option if you have an IBM z/Architecture machine
- and want to use the 64 bit addressing mode.
-
-config 32BIT
- def_bool y if !64BIT
config COMPAT
def_bool y
prompt "Kernel support for 31 bit emulation"
- depends on 64BIT
select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -376,8 +363,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-512)"
range 2 512
depends on SMP
- default "32" if !64BIT
- default "64" if 64BIT
+ default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 512 and the
@@ -418,15 +404,6 @@ config SCHED_TOPOLOGY
source kernel/Kconfig.preempt
-config MATHEMU
- def_bool y
- prompt "IEEE FPU emulation"
- depends on MARCH_G5
- help
- This option is required for IEEE compliant floating point arithmetic
- on older ESA/390 machines. Say Y unless you know your machine doesn't
- need this.
-
source kernel/Kconfig.hz
endmenu
@@ -437,7 +414,6 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
select SPARSEMEM_VMEMMAP
- select SPARSEMEM_STATIC if !64BIT
config ARCH_SPARSEMEM_DEFAULT
def_bool y
@@ -453,7 +429,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
- depends on 64BIT
config FORCE_MAX_ZONEORDER
int
@@ -528,7 +503,6 @@ config QDIO
menuconfig PCI
bool "PCI support"
- depends on 64BIT
select HAVE_DMA_ATTRS
select PCI_MSI
help
@@ -598,7 +572,6 @@ config CHSC_SCH
config SCM_BUS
def_bool y
- depends on 64BIT
prompt "SCM bus driver"
help
Bus driver for Storage Class Memory.
@@ -620,7 +593,7 @@ menu "Dump support"
config CRASH_DUMP
bool "kernel crash dumps"
- depends on 64BIT && SMP
+ depends on SMP
select KEXEC
help
Generate crash dump after being started by kexec.
@@ -659,7 +632,7 @@ endmenu
menu "Power Management"
config ARCH_HIBERNATION_POSSIBLE
- def_bool y if 64BIT
+ def_bool y
source "kernel/power/Kconfig"
@@ -810,7 +783,6 @@ source "arch/s390/kvm/Kconfig"
config S390_GUEST
def_bool y
prompt "s390 support for virtio devices"
- depends on 64BIT
select TTY
select VIRTUALIZATION
select VIRTIO
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index acb6859..667b1bc 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,15 +13,6 @@
# Copyright (C) 1994 by Linus Torvalds
#
-ifndef CONFIG_64BIT
-LD_BFD := elf32-s390
-LDFLAGS := -m elf_s390
-KBUILD_CFLAGS += -m31
-KBUILD_AFLAGS += -m31
-UTS_MACHINE := s390
-STACK_SIZE := 8192
-CHECKFLAGS += -D__s390__ -msize-long
-else
LD_BFD := elf64-s390
LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
@@ -31,11 +22,9 @@ KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__
-endif
export LD_BFD
-mflags-$(CONFIG_MARCH_G5) := -march=g5
mflags-$(CONFIG_MARCH_Z900) := -march=z900
mflags-$(CONFIG_MARCH_Z990) := -march=z990
mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
@@ -47,7 +36,6 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
-cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
@@ -104,7 +92,7 @@ KBUILD_AFLAGS += $(aflags-y)
OBJCOPYFLAGS := -O binary
head-y := arch/s390/kernel/head.o
-head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
+head-y += arch/s390/kernel/head64.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
@@ -129,9 +117,7 @@ zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
vdso_install:
-ifeq ($(CONFIG_64BIT),y)
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
-endif
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
archclean:
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index f90d1fc..d478811 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -4,13 +4,11 @@
# create a compressed vmlinux image from the original vmlinux
#
-BITS := $(if $(CONFIG_64BIT),64,31)
-
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head$(BITS).o
+targets += misc.o piggy.o sizes.h head.o
-KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
@@ -19,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
-OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
@@ -34,8 +32,8 @@ quiet_cmd_sizes = GEN $@
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
-AFLAGS_head$(BITS).o += -I$(obj)
-$(obj)/head$(BITS).o: $(obj)/sizes.h
+AFLAGS_head.o += -I$(obj)
+$(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(obj)
$(obj)/misc.o: $(obj)/sizes.h
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head.S
index f86a4ee..f86a4ee 100644
--- a/arch/s390/boot/compressed/head64.S
+++ b/arch/s390/boot/compressed/head.S
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
deleted file mode 100644
index e8c9e18..0000000
--- a/arch/s390/boot/compressed/head31.S
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_continue)
- basr %r13,0 # get base
-.LPG1:
- # setup stack
- l %r15,.Lstack-.LPG1(%r13)
- ahi %r15,-96
- l %r1,.Ldecompress-.LPG1(%r13)
- basr %r14,%r1
- # setup registers for memory mover & branch to target
- lr %r4,%r2
- l %r2,.Loffset-.LPG1(%r13)
- la %r4,0(%r2,%r4)
- l %r3,.Lmvsize-.LPG1(%r13)
- lr %r5,%r3
- # move the memory mover someplace safe
- la %r1,0x200
- mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
- # decompress image is started at 0x11000
- lr %r6,%r2
- br %r1
-mover:
- mvcle %r2,%r4,0
- jo mover
- br %r6
-mover_end:
-
- .align 8
-.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
-.Ldecompress:
- .long decompress_kernel
-.Loffset:
- .long 0x11000
-.Lmvsize:
- .long SZ__bss_start
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 8e1fb82..747735f 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -1,12 +1,7 @@
#include <asm-generic/vmlinux.lds.h>
-#ifdef CONFIG_64BIT
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
-#else
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-#endif
ENTRY(startup)
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 6c5cc6d..ba3b2ae 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -369,14 +369,10 @@ static inline int crypt_s390_func_available(int func,
if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
return 0;
-
- if (facility_mask & CRYPT_S390_MSA3 &&
- (!test_facility(2) || !test_facility(76)))
+ if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
return 0;
- if (facility_mask & CRYPT_S390_MSA4 &&
- (!test_facility(2) || !test_facility(77)))
+ if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0;
-
switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index d4c0d37..24c747a 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -19,13 +19,9 @@
static void diag0c(struct hypfs_diag0c_entry *entry)
{
asm volatile (
-#ifdef CONFIG_64BIT
" sam31\n"
" diag %0,%0,0x0c\n"
" sam64\n"
-#else
- " diag %0,%0,0x0c\n"
-#endif
: /* no output register */
: "a" (entry)
: "memory");
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 99824ff..df7d8cb 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/aio.h>
+#include <linux/uio.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 32a7059..16887c5 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -9,28 +9,6 @@
#include <asm/io.h>
-#ifndef CONFIG_64BIT
-
-#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
-#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
-#define APPLDATA_GEN_EVENT_REC 0x02
-#define APPLDATA_START_CONFIG_REC 0x03
-
-/*
- * Parameter list for DIAGNOSE X'DC'
- */
-struct appldata_parameter_list {
- u16 diag; /* The DIAGNOSE code X'00DC' */
- u8 function; /* The function code for the DIAGNOSE */
- u8 parlist_length; /* Length of the parameter list */
- u32 product_id_addr; /* Address of the 16-byte product ID */
- u16 reserved;
- u16 buffer_length; /* Length of the application data buffer */
- u32 buffer_addr; /* Address of the application data buffer */
-} __attribute__ ((packed));
-
-#else /* CONFIG_64BIT */
-
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_REC 0x82
@@ -51,8 +29,6 @@ struct appldata_parameter_list {
u64 buffer_addr;
} __attribute__ ((packed));
-#endif /* CONFIG_64BIT */
-
struct appldata_product_id {
char prod_nr[7]; /* product number */
u16 prod_fn; /* product function */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa934fe..adbe380 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) }
-#ifdef CONFIG_64BIT
-
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
#undef __ATOMIC64_LOOP
-#else /* CONFIG_64BIT */
-
-typedef struct {
- long long counter;
-} atomic64_t;
-
-static inline long long atomic64_read(const atomic64_t *v)
-{
- register_pair rp;
-
- asm volatile(
- " lm %0,%N0,%1"
- : "=&d" (rp) : "Q" (v->counter) );
- return rp.pair;
-}
-
-static inline void atomic64_set(atomic64_t *v, long long i)
-{
- register_pair rp = {.pair = i};
-
- asm volatile(
- " stm %1,%N1,%0"
- : "=Q" (v->counter) : "d" (rp) );
-}
-
-static inline long long atomic64_xchg(atomic64_t *v, long long new)
-{
- register_pair rp_new = {.pair = new};
- register_pair rp_old;
-
- asm volatile(
- " lm %0,%N0,%1\n"
- "0: cds %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
-{
- register_pair rp_old = {.pair = old};
- register_pair rp_new = {.pair = new};
-
- asm volatile(
- " cds %0,%2,%1"
- : "+&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old + i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
-static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old | mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old & mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-#endif /* CONFIG_64BIT */
-
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 5205424..9b68e98 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -51,32 +51,6 @@
#define __BITOPS_NO_BARRIER "\n"
-#ifndef CONFIG_64BIT
-
-#define __BITOPS_OR "or"
-#define __BITOPS_AND "nr"
-#define __BITOPS_XOR "xr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- __op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_64BIT */
-
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __BITOPS_OR "laog"
@@ -125,8 +99,6 @@
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-#endif /* CONFIG_64BIT */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 6259895..4eadec4 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -80,15 +80,10 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
- int __ret; \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- if (sizeof(long) == 4) \
- __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
- else \
- __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
- __ret; \
+ __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
})
#define system_has_cmpxchg_double() 1
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index b91e960..221b454 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t;
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = n >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
- return rp.subreg.odd;
-#else /* CONFIG_64BIT */
return n / base;
-#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
- value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
@@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
- value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 31ab9f3..cfad7fca 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,20 +9,12 @@
#include <linux/bug.h>
-#ifdef CONFIG_64BIT
-# define __CTL_LOAD "lctlg"
-# define __CTL_STORE "stctg"
-#else
-# define __CTL_LOAD "lctl"
-# define __CTL_STORE "stctl"
-#endif
-
#define __ctl_load(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_LOAD " %1,%2,%0\n" \
+ " lctlg %1,%2,%0\n" \
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
}
@@ -31,7 +23,7 @@
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_STORE " %1,%2,%0\n" \
+ " stctg %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
}
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
-#ifdef CONFIG_64BIT
unsigned long : 32;
-#endif
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b..3ad48f2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,7 @@
/*
* These are used to set parameters in the core dumps.
*/
-#ifndef CONFIG_64BIT
-#define ELF_CLASS ELFCLASS32
-#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
-#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -161,10 +157,11 @@ extern unsigned int vdso_enabled;
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-extern unsigned long randomize_et_dyn(void);
-#define ELF_ET_DYN_BASE randomize_et_dyn()
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -211,7 +208,7 @@ do { \
extern unsigned long mmap_rnd_mask;
-#define STACK_RND_MASK (mmap_rnd_mask)
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
@@ -225,9 +222,6 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
int arch_setup_additional_pages(struct linux_binprm *, int);
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index ea5a6e4..a7b2d75 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -19,11 +19,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
-#else
-#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
-#endif
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
/*
@@ -32,11 +28,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
-#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
-#else
- return 0;
-#endif
}
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
-#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
-#endif
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
-#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
-#endif
ccw->cda = 0;
}
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
-#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
-#else
- return ib->size > (4096ul << ib->page_order);
-#endif
}
/*
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 58642fd..69972b7 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -1,19 +1,13 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
-#ifdef CONFIG_64BIT
-#define ASM_PTR ".quad"
-#define ASM_ALIGN ".balign 8"
-#else
-#define ASM_PTR ".long"
-#define ASM_ALIGN ".balign 4"
-#endif
-
/*
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
@@ -22,8 +16,8 @@ static __always_inline bool arch_static_branch(struct static_key *key)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
- ASM_ALIGN "\n"
- ASM_PTR " 0b, %l[label], %0\n"
+ ".balign 8\n"
+ ".quad 0b, %l[label], %0\n"
".popsection\n"
: : "X" (key) : : label);
return false;
@@ -39,4 +33,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index f407bbf..d01fc58 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[30]; /* 0x01c0 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256];
} __packed;
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed;
struct kvm_vcpu_stat {
@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -334,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT
};
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
/*
* Repressible (non-floating) machine check interrupts
* subclass bits in MCIC
@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
unsigned long pending_irqs;
};
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
spinlock_t lock;
- struct list_head list;
- atomic_t active;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
@@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -553,6 +588,7 @@ struct kvm_arch{
int use_cmma;
int user_cpu_state_ctrl;
int user_sigp;
+ int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
new file mode 100644
index 0000000..7aa7991
--- /dev/null
+++ b/arch/s390/include/asm/livepatch.h
@@ -0,0 +1,43 @@
+/*
+ * livepatch.h - s390-specific Kernel Live Patching Core
+ *
+ * Copyright (c) 2013-2015 SUSE
+ * Authors: Jiri Kosina
+ * Vojtech Pavlik
+ * Jiri Slaby
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef ASM_LIVEPATCH_H
+#define ASM_LIVEPATCH_H
+
+#include <linux/module.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+ type, unsigned long loc, unsigned long value)
+{
+ /* not supported yet */
+ return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->psw.addr = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 34fbcac..663f23e 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -13,163 +13,6 @@
#include <asm/cpu.h>
#include <asm/types.h>
-#ifdef CONFIG_32BIT
-
-#define LC_ORDER 0
-#define LC_PAGES 1
-
-struct save_area {
- u32 ext_save;
- u64 timer;
- u64 clk_cmp;
- u8 pad1[24];
- u8 psw[8];
- u32 pref_reg;
- u8 pad2[20];
- u32 acc_regs[16];
- u64 fp_regs[4];
- u32 gp_regs[16];
- u32 ctrl_regs[16];
-} __packed;
-
-struct save_area_ext {
- struct save_area sa;
- __vector128 vx_regs[32];
-};
-
-struct _lowcore {
- psw_t restart_psw; /* 0x0000 */
- psw_t restart_old_psw; /* 0x0008 */
- __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
- __u32 ipl_parmblock_ptr; /* 0x0014 */
- psw_t external_old_psw; /* 0x0018 */
- psw_t svc_old_psw; /* 0x0020 */
- psw_t program_old_psw; /* 0x0028 */
- psw_t mcck_old_psw; /* 0x0030 */
- psw_t io_old_psw; /* 0x0038 */
- __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
- psw_t external_new_psw; /* 0x0058 */
- psw_t svc_new_psw; /* 0x0060 */
- psw_t program_new_psw; /* 0x0068 */
- psw_t mcck_new_psw; /* 0x0070 */
- psw_t io_new_psw; /* 0x0078 */
- __u32 ext_params; /* 0x0080 */
- __u16 ext_cpu_addr; /* 0x0084 */
- __u16 ext_int_code; /* 0x0086 */
- __u16 svc_ilc; /* 0x0088 */
- __u16 svc_code; /* 0x008a */
- __u16 pgm_ilc; /* 0x008c */
- __u16 pgm_code; /* 0x008e */
- __u32 trans_exc_code; /* 0x0090 */
- __u16 mon_class_num; /* 0x0094 */
- __u8 per_code; /* 0x0096 */
- __u8 per_atmid; /* 0x0097 */
- __u32 per_address; /* 0x0098 */
- __u32 monitor_code; /* 0x009c */
- __u8 exc_access_id; /* 0x00a0 */
- __u8 per_access_id; /* 0x00a1 */
- __u8 op_access_id; /* 0x00a2 */
- __u8 ar_mode_id; /* 0x00a3 */
- __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
- __u16 subchannel_id; /* 0x00b8 */
- __u16 subchannel_nr; /* 0x00ba */
- __u32 io_int_parm; /* 0x00bc */
- __u32 io_int_word; /* 0x00c0 */
- __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
- __u32 stfl_fac_list; /* 0x00c8 */
- __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
- __u32 extended_save_area_addr; /* 0x00d4 */
- __u32 cpu_timer_save_area[2]; /* 0x00d8 */
- __u32 clock_comp_save_area[2]; /* 0x00e0 */
- __u32 mcck_interruption_code[2]; /* 0x00e8 */
- __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
- __u32 external_damage_code; /* 0x00f4 */
- __u32 failing_storage_address; /* 0x00f8 */
- __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
- psw_t psw_save_area; /* 0x0100 */
- __u32 prefixreg_save_area; /* 0x0108 */
- __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
-
- /* CPU register save area: defined by architecture */
- __u32 access_regs_save_area[16]; /* 0x0120 */
- __u32 floating_pt_save_area[8]; /* 0x0160 */
- __u32 gpregs_save_area[16]; /* 0x0180 */
- __u32 cregs_save_area[16]; /* 0x01c0 */
-
- /* Save areas. */
- __u32 save_area_sync[8]; /* 0x0200 */
- __u32 save_area_async[8]; /* 0x0220 */
- __u32 save_area_restart[1]; /* 0x0240 */
-
- /* CPU flags. */
- __u32 cpu_flags; /* 0x0244 */
-
- /* Return psws. */
- psw_t return_psw; /* 0x0248 */
- psw_t return_mcck_psw; /* 0x0250 */
-
- /* CPU time accounting values */
- __u64 sync_enter_timer; /* 0x0258 */
- __u64 async_enter_timer; /* 0x0260 */
- __u64 mcck_enter_timer; /* 0x0268 */
- __u64 exit_timer; /* 0x0270 */
- __u64 user_timer; /* 0x0278 */
- __u64 system_timer; /* 0x0280 */
- __u64 steal_timer; /* 0x0288 */
- __u64 last_update_timer; /* 0x0290 */
- __u64 last_update_clock; /* 0x0298 */
- __u64 int_clock; /* 0x02a0 */
- __u64 mcck_clock; /* 0x02a8 */
- __u64 clock_comparator; /* 0x02b0 */
-
- /* Current process. */
- __u32 current_task; /* 0x02b8 */
- __u32 thread_info; /* 0x02bc */
- __u32 kernel_stack; /* 0x02c0 */
-
- /* Interrupt, panic and restart stack. */
- __u32 async_stack; /* 0x02c4 */
- __u32 panic_stack; /* 0x02c8 */
- __u32 restart_stack; /* 0x02cc */
-
- /* Restart function and parameter. */
- __u32 restart_fn; /* 0x02d0 */
- __u32 restart_data; /* 0x02d4 */
- __u32 restart_source; /* 0x02d8 */
-
- /* Address space pointer. */
- __u32 kernel_asce; /* 0x02dc */
- __u32 user_asce; /* 0x02e0 */
- __u32 current_pid; /* 0x02e4 */
-
- /* SMP info area */
- __u32 cpu_nr; /* 0x02e8 */
- __u32 softirq_pending; /* 0x02ec */
- __u32 percpu_offset; /* 0x02f0 */
- __u32 machine_flags; /* 0x02f4 */
- __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
- __u32 spinlock_lockval; /* 0x02fc */
-
- __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
-
- /*
- * 0xe00 contains the address of the IPL Parameter Information
- * block. Dump tools need IPIB for IPL after dump.
- * Note: do not change the position of any fields in 0x0e00-0x0f00
- */
- __u32 ipib; /* 0x0e00 */
- __u32 ipib_checksum; /* 0x0e04 */
- __u32 vmcore_info; /* 0x0e08 */
- __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
- __u32 os_info; /* 0x0e18 */
- __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
-
- /* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
-} __packed;
-
-#else /* CONFIG_32BIT */
-
#define LC_ORDER 1
#define LC_PAGES 2
@@ -354,8 +197,6 @@ struct _lowcore {
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
-#endif /* CONFIG_32BIT */
-
#define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 9977e08..b55a59e 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -8,7 +8,7 @@
#include <uapi/asm/mman.h>
-#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
+#ifndef __ASSEMBLY__
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8fb3802..d25d9ff 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
-#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
-#endif
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
mm->context.asce_limit = STACK_TOP_MAX;
@@ -110,10 +108,8 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
-#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
-#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 933355e..6d6556c 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,8 +10,6 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
-#ifdef CONFIG_64BIT
-
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
@@ -183,8 +181,6 @@
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif /* CONFIG_64BIT */
-
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 159a8ec..4cb19fe 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,8 +9,6 @@
#ifndef _ASM_S390_PERF_EVENT_H
#define _ASM_S390_PERF_EVENT_H
-#ifdef CONFIG_64BIT
-
#include <linux/perf_event.h>
#include <linux/device.h>
#include <asm/cpu_mf.h>
@@ -92,5 +90,4 @@ struct sf_raw_sample {
int perf_reserve_sampling(void);
void perf_release_sampling(void);
-#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 3009c2b..51e7fb6 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -33,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
*s = val;
n = (n / 256) - 1;
asm volatile(
-#ifdef CONFIG_64BIT
" mvc 8(248,%0),0(%0)\n"
-#else
- " mvc 4(252,%0),0(%0)\n"
-#endif
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
@@ -50,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
-#ifndef CONFIG_64BIT
-
-static inline unsigned long pgd_entry_type(struct mm_struct *mm)
-{
- return _SEGMENT_ENTRY_EMPTY;
-}
-
-#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(mm, x) do { } while (0)
-
-#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(mm, x) do { } while (0)
-
-#define pgd_populate(mm, pgd, pud) BUG()
-#define pud_populate(mm, pud, pmd) BUG()
-
-#else /* CONFIG_64BIT */
-
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
if (mm->context.asce_limit <= (1UL << 31))
@@ -119,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
-#endif /* CONFIG_64BIT */
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e08ec38..989cfae 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -66,15 +66,9 @@ extern unsigned long zero_page_mask;
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
-#ifndef CONFIG_64BIT
-# define PMD_SHIFT 20
-# define PUD_SHIFT 20
-# define PGDIR_SHIFT 20
-#else /* CONFIG_64BIT */
-# define PMD_SHIFT 20
-# define PUD_SHIFT 31
-# define PGDIR_SHIFT 42
-#endif /* CONFIG_64BIT */
+#define PMD_SHIFT 20
+#define PUD_SHIFT 31
+#define PGDIR_SHIFT 42
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -90,15 +84,8 @@ extern unsigned long zero_page_mask;
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
-#ifndef CONFIG_64BIT
-#define __PAGETABLE_PUD_FOLDED
-#define PTRS_PER_PMD 1
-#define __PAGETABLE_PMD_FOLDED
-#define PTRS_PER_PUD 1
-#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
-#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0UL
@@ -127,23 +114,19 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-#ifdef CONFIG_64BIT
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
-#endif
static inline int is_module_addr(void *addr)
{
-#ifdef CONFIG_64BIT
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
if (addr < (void *)MODULES_VADDR)
return 0;
if (addr > (void *)MODULES_END)
return 0;
-#endif
return 1;
}
@@ -284,56 +267,6 @@ static inline int is_module_addr(void *addr)
* pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
-#ifndef CONFIG_64BIT
-
-/* Bits in the segment table address-space-control-element */
-#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
-#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
-#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
-#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
-#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
-
-/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
-#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
-#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
-#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
-
-#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
-#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
-#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
-#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
-#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
-#define _SEGMENT_ENTRY_BITS_LARGE 0
-#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
-
-#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
-
-/*
- * Segment table entry encoding (I = invalid, R = read-only bit):
- * ..R...I.....
- * prot-none ..1...1.....
- * read-only ..1...0.....
- * read-write ..0...0.....
- * empty ..0...1.....
- */
-
-/* Page status table bits for virtualization */
-#define PGSTE_ACC_BITS 0xf0000000UL
-#define PGSTE_FP_BIT 0x08000000UL
-#define PGSTE_PCL_BIT 0x00800000UL
-#define PGSTE_HR_BIT 0x00400000UL
-#define PGSTE_HC_BIT 0x00200000UL
-#define PGSTE_GR_BIT 0x00040000UL
-#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
-
-#else /* CONFIG_64BIT */
-
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -417,8 +350,6 @@ static inline int is_module_addr(void *addr)
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#endif /* CONFIG_64BIT */
-
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -509,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
-#ifndef CONFIG_64BIT
-
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-
-static inline int pud_present(pud_t pud) { return 1; }
-static inline int pud_none(pud_t pud) { return 0; }
-static inline int pud_large(pud_t pud) { return 0; }
-static inline int pud_bad(pud_t pud) { return 0; }
-
-#else /* CONFIG_64BIT */
-
static inline int pgd_present(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -583,8 +501,6 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
-#endif /* CONFIG_64BIT */
-
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -916,18 +832,14 @@ static inline int pte_unused(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
-#endif
}
static inline void pud_clear(pud_t *pud)
{
-#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
-#endif
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -1026,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + global TLB flush for the pte */
asm volatile(
" ipte %2,%3"
@@ -1040,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
@@ -1054,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
@@ -1376,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#ifndef CONFIG_64BIT
-
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
-#define pud_deref(pmd) ({ BUG(); 0UL; })
-#define pgd_deref(pmd) ({ BUG(); 0UL; })
-
-#define pud_offset(pgd, address) ((pud_t *) pgd)
-#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-
-#else /* CONFIG_64BIT */
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1407,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
-#endif /* CONFIG_64BIT */
-
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1729,11 +1616,9 @@ static inline int has_transparent_hugepage(void)
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
-#ifndef CONFIG_64BIT
-#define __SWP_OFFSET_MASK (~0UL >> 12)
-#else
+
#define __SWP_OFFSET_MASK (~0UL >> 11)
-#endif
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e7cbbdc..dedb621 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -19,7 +19,6 @@
#define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
-
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -66,13 +65,6 @@ extern void execve_tail(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
-#ifndef CONFIG_64BIT
-
-#define TASK_SIZE (1UL << 31)
-#define TASK_MAX_SIZE (1UL << 31)
-#define TASK_UNMAPPED_BASE (1UL << 30)
-
-#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
@@ -80,15 +72,8 @@ extern void execve_tail(void);
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_MAX_SIZE (1UL << 53)
-#endif /* CONFIG_64BIT */
-
-#ifndef CONFIG_64BIT
-#define STACK_TOP (1UL << 31)
-#define STACK_TOP_MAX (1UL << 31)
-#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
-#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -115,10 +100,8 @@ struct thread_struct {
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
int ri_signum;
-#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
-#endif
};
/* Flag to disable transactions. */
@@ -181,11 +164,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-#ifdef CONFIG_64BIT
-extern void show_cacheinfo(struct seq_file *m);
-#else
-static inline void show_cacheinfo(struct seq_file *m) { }
-#endif
+void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key)
*/
static inline void __load_psw(psw_t psw)
{
-#ifndef CONFIG_64BIT
- asm volatile("lpsw %0" : : "Q" (psw) : "cc");
-#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
-#endif
}
/*
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask;
-#ifndef CONFIG_64BIT
- asm volatile(
- " basr %0,0\n"
- "0: ahi %0,1f-0b\n"
- " st %0,%O1+4(%R1)\n"
- " lpsw %1\n"
- "1:"
- : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* CONFIG_64BIT */
}
/*
@@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask)
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
-#ifndef CONFIG_64BIT
- if (psw.addr & PSW_ADDR_AMODE)
- /* 31 bit mode */
- return (psw.addr - ilc) | PSW_ADDR_AMODE;
- /* 24 bit mode */
- return (psw.addr - ilc) & ((1UL << 24) - 1);
-#else
unsigned long mask;
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
(1UL << 24) - 1;
return (psw.addr - ilc) & mask;
-#endif
}
/*
@@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
-#ifndef CONFIG_64BIT
- asm volatile(
- " stctl 0,0,0(%2)\n"
- " ni 0(%2),0xef\n" /* switch off protection */
- " lctl 0,0,0(%2)\n"
- " stpt 0xd8\n" /* store timer */
- " stckc 0xe0\n" /* store clock comparator */
- " stpx 0x108\n" /* store prefix register */
- " stam 0,15,0x120\n" /* store access registers */
- " std 0,0x160\n" /* store f0 */
- " std 2,0x168\n" /* store f2 */
- " std 4,0x170\n" /* store f4 */
- " std 6,0x178\n" /* store f6 */
- " stm 0,15,0x180\n" /* store general registers */
- " stctl 0,15,0x1c0\n" /* store control registers */
- " oi 0x1c0,0x10\n" /* fake protection bit */
- " lpsw 0(%1)"
- : "=m" (ctl_buf)
- : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
@@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* CONFIG_64BIT */
while (1);
}
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index be317fe..6feda25 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -40,12 +40,8 @@ struct psw_bits {
unsigned long long ri : 1; /* Runtime Instrumentation */
unsigned long long : 6;
unsigned long long eaba : 2; /* Addressing Mode */
-#ifdef CONFIG_64BIT
unsigned long long : 31;
unsigned long long ia : 64;/* Instruction Address */
-#else
- unsigned long long ia : 31;/* Instruction Address */
-#endif
};
enum {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 06f3034..998b61c 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -211,11 +211,6 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
-#ifdef CONFIG_32BIT
- /* private: */
- void *res2;
- /* public: */
-#endif
void *addr;
} __attribute__ ((packed, aligned(16)));
@@ -232,11 +227,6 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
-#ifdef CONFIG_32BIT
- /* private: */
- unsigned long reserved;
- /* public: */
-#endif
unsigned long sbal;
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 830da73..402ad6d 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_prev)
store_runtime_instr_cb(cb_prev);
-#endif
}
static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_next)
load_runtime_instr_cb(cb_next);
else if (cb_prev)
load_runtime_instr_cb(&runtime_instr_empty_cb);
-#endif
}
-#ifdef CONFIG_64BIT
-extern void exit_thread_runtime_instr(void);
-#else
-static inline void exit_thread_runtime_instr(void) { }
-#endif
+void exit_thread_runtime_instr(void);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 487f9b6..4b43ee7 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -39,17 +39,10 @@
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
-#ifndef CONFIG_64BIT
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: ltr %1,%0\n"
- " jm 1f\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b\n"
- "1:"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
signed long old;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%1\n"
- "0: ltr %0,%0\n"
- " jnz 1f\n"
- " cs %0,%3,%1\n"
- " jl 0b\n"
-#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
-#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8d1e54..b8ffc1b 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -15,19 +15,11 @@
#include <asm/lowcore.h>
#include <asm/types.h>
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE (*(unsigned long *) (0x10404))
-#define INITRD_START (*(unsigned long *) (0x1040C))
-#define INITRD_SIZE (*(unsigned long *) (0x10414))
-#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
-#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
-#else /* CONFIG_64BIT */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480))
extern int memory_end_set;
@@ -68,26 +60,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
-#ifndef CONFIG_64BIT
-#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
-#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
-#define MACHINE_HAS_IDTE (0)
-#define MACHINE_HAS_DIAG44 (1)
-#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
-#define MACHINE_HAS_EDAT1 (0)
-#define MACHINE_HAS_EDAT2 (0)
-#define MACHINE_HAS_LPP (0)
-#define MACHINE_HAS_TOPOLOGY (0)
-#define MACHINE_HAS_TE (0)
-#define MACHINE_HAS_TLB_LC (0)
-#define MACHINE_HAS_VX (0)
-#define MACHINE_HAS_CAD (0)
-#else /* CONFIG_64BIT */
-#define MACHINE_HAS_IEEE (1)
-#define MACHINE_HAS_CSP (1)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
-#define MACHINE_HAS_MVPG (1)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
@@ -96,7 +70,6 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
-#endif /* CONFIG_64BIT */
/*
* Console mode. Override with conmode=
@@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE 0x10404
-#define INITRD_START 0x1040C
-#define INITRD_SIZE 0x10414
-#define OLDMEM_BASE 0x1041C
-#define OLDMEM_SIZE 0x10424
-#else /* CONFIG_64BIT */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE 0x10480
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 5959bfb..c8b7cf9 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,6 @@
wl = __wl; \
})
-#ifdef CONFIG_64BIT
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned long __n; \
unsigned int __r, __d; \
@@ -60,15 +59,6 @@
(q) = __n / __d; \
(r) = __n % __d; \
} while (0)
-#else
-#define udiv_qrnnd(q, r, n1, n0, d) \
- do { unsigned int __r; \
- (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
- (r) = __r; \
- } while (0)
-extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
- unsigned int , unsigned int);
-#endif
#define UDIV_NEEDS_NORMALIZATION 0
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index a60d085..487428b 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -1,16 +1,7 @@
#ifndef _ASM_S390_SPARSEMEM_H
#define _ASM_S390_SPARSEMEM_H
-#ifdef CONFIG_64BIT
-
#define SECTION_SIZE_BITS 28
#define MAX_PHYSMEM_BITS 46
-#else
-
-#define SECTION_SIZE_BITS 25
-#define MAX_PHYSMEM_BITS 31
-
-#endif /* CONFIG_64BIT */
-
#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 2542a7e..d62e7a6 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc)
u32 orig_fpc;
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" efpc %1\n"
" sfpc %2\n"
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc)
static inline void save_fp_ctl(u32 *fpc)
{
- if (!MACHINE_HAS_IEEE)
- return;
-
asm volatile(
" stfpc %0\n"
: "+Q" (*fpc));
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc)
{
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" lfpc %1\n"
"0: la %0,0\n"
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs)
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("std 1,%0" : "=Q" (fprs[1]));
asm volatile("std 3,%0" : "=Q" (fprs[3]));
asm volatile("std 5,%0" : "=Q" (fprs[5]));
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
@@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs)
static inline void save_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
-#endif
- save_fp_regs(task->thread.fp_regs.fprs);
+ save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
-#endif
- restore_fp_regs(task->thread.fp_regs.fprs);
+ restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 5bc1259..6ba0bf9 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -95,6 +95,6 @@ static inline int syscall_get_arch(void)
if (test_tsk_thread_flag(current, TIF_31BIT))
return AUDIT_ARCH_S390;
#endif
- return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+ return AUDIT_ARCH_S390X;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ef1df71..d532098 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -10,13 +10,8 @@
/*
* Size of kernel stack for each process
*/
-#ifndef CONFIG_64BIT
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#else /* CONFIG_64BIT */
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -66,6 +61,8 @@ static inline struct thread_info *current_thread_info(void)
return (struct thread_info *) S390_lowcore.thread_info;
}
+void arch_release_task_struct(struct task_struct *tsk);
+
#define THREAD_SIZE_ORDER THREAD_ORDER
#endif
@@ -99,10 +96,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
-#ifdef CONFIG_64BIT
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
-#else
-#define is_32bit_task() (1)
-#endif
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 06d8741..7a92e69 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
-#endif
}
/*
@@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
tlb_remove_table(tlb, pud);
-#endif
}
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 16c9c88..ca148f7 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void)
register unsigned long reg4 asm("4");
long dummy;
-#ifndef CONFIG_64BIT
- if (!MACHINE_HAS_CSP) {
- smp_ptlb_all();
- return;
- }
-#endif /* CONFIG_64BIT */
-
dummy = 0;
reg2 = reg3 = 0;
reg4 = ((unsigned long) &dummy) + 1;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index dccef3c..6740f4f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -8,21 +8,4 @@
#include <uapi/asm/types.h>
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-
-#ifndef __ASSEMBLY__
-
-#ifndef CONFIG_64BIT
-typedef union {
- unsigned long long pair;
- struct {
- unsigned long even;
- unsigned long odd;
- } subreg;
-} register_pair;
-
-#endif /* ! CONFIG_64BIT */
-#endif /* __ASSEMBLY__ */
#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cd4c68e..d64a7a6 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
}
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
+void s390_kernel_write(void *dst, const void *src, size_t size);
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 6518863..91f56b1 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,11 +9,7 @@
#include <uapi/asm/unistd.h>
-#ifndef CONFIG_64BIT
-#define __IGNORE_select
-#else
#define __IGNORE_time
-#endif
/* Ignore NUMA system calls. Not wired up on s390. */
#define __IGNORE_mbind
@@ -43,10 +39,6 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-# ifndef CONFIG_64BIT
-# define __ARCH_WANT_STAT64
-# define __ARCH_WANT_SYS_TIME
-# endif
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
# endif
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a62526d..787acd4 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -42,10 +42,8 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
-#ifdef CONFIG_64BIT
int vdso_alloc_per_cpu(struct _lowcore *lowcore);
void vdso_free_per_cpu(struct _lowcore *lowcore);
-#endif
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9c77e60..ef1a5fc 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */
+ __u64 vrs[32][2]; /* vector registers */
+ __u8 reserved[512]; /* for future vector expansion */
+ __u32 fpc; /* only valid with vector registers */
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index d4096fd..ee69c08 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
-#define icpt_insn_decoder(insn) \
+#define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
- INSN_DECODE(insn)
+ INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 31fab26..ffb8761 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += dumpstack.o
+obj-y += runtime_instr.o cache.o dumpstack.o
+obj-y += entry.o reipl.o relocate_kernel.o
-obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
-obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
-obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
-
-extra-y += head.o vmlinux.lds
-extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
+extra-y += head.o head64.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
-obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
-ifdef CONFIG_64BIT
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
- perf_cpum_cf_events.o
-obj-y += runtime_instr.o cache.o
-endif
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
# vdso
-obj-$(CONFIG_64BIT) += vdso64/
-obj-$(CONFIG_32BIT) += vdso32/
+obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e916..f35058d 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -166,11 +166,9 @@ int main(void)
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
-#ifdef CONFIG_32BIT
- DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
-#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
+ DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
@@ -183,6 +181,5 @@ int main(void)
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
-#endif /* CONFIG_32BIT */
return 0;
}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f74a53d..daed3fd 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,8 +11,6 @@
#include <asm/ptrace.h>
#include <asm/sigp.h>
-#ifdef CONFIG_64BIT
-
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
@@ -131,77 +129,3 @@ ENTRY(diag308_reset)
.Lfpctl:
.long 0
.previous
-
-#else /* CONFIG_64BIT */
-
-ENTRY(s390_base_mcck_handler)
- basr %r13,0
-0: l %r15,__LC_PANIC_STACK # load panic stack
- ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
- lpsw __LC_MCK_OLD_PSW
-
-2: .long s390_base_mcck_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_mcck_handler_fn
-s390_base_mcck_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_ext_handler)
- stm %r0,%r15,__LC_SAVE_AREA_ASYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
- ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
- lpsw __LC_EXT_OLD_PSW
-
-2: .long s390_base_ext_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_ext_handler_fn
-s390_base_ext_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_pgm_handler)
- stm %r0,%r15,__LC_SAVE_AREA_SYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
- lm %r0,%r15,__LC_SAVE_AREA_SYNC
- lpsw __LC_PGM_OLD_PSW
-
-1: lpsw disabled_wait_psw-0b(%r13)
-
-2: .long s390_base_pgm_handler_fn
-
-disabled_wait_psw:
- .align 8
- .long 0x000a0000,0x00000000 + s390_base_pgm_handler
-
- .section .bss
- .align 4
- .globl s390_base_pgm_handler_fn
-s390_base_pgm_handler_fn:
- .long 0
- .previous
-
-#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 0969d113..bff5e3b 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
+ if (!test_facility(34))
+ return;
get_online_cpus();
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
@@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
+ if (!test_facility(34))
+ return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d..199ec92 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen)
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %1,%0,0x8\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %1,%0,0x8\n"
" sam64\n"
-#endif /* CONFIG_64BIT */
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
}
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
register unsigned long reg5 asm ("5") = *rlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %2,%0,0x8\n"
- " brc 8,1f\n"
- " ar %1,%4\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %2,%0,0x8\n"
" sam64\n"
" brc 8,1f\n"
" agr %1,%4\n"
-#endif /* CONFIG_64BIT */
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 8237fc0..2f69243 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
int rc = 0;
asm volatile(
-#ifdef CONFIG_64BIT
" sam31\n"
" diag %2,2,0x14\n"
" sam64\n"
-#else
- " diag %2,2,0x14\n"
-#endif
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+d" (_ry2)
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr)
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
-#ifdef CONFIG_64BIT
asm volatile(
" lhi %0,-1\n"
" sam31\n"
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr)
"1: sam64\n"
EX_TABLE(0b, 1b)
: "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#else
- asm volatile(
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#endif
*addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 5334303..8140d10 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -32,12 +32,6 @@
#include <asm/debug.h>
#include <asm/irq.h>
-#ifndef CONFIG_64BIT
-#define ONELONG "%08lx: "
-#else /* CONFIG_64BIT */
-#define ONELONG "%016lx: "
-#endif /* CONFIG_64BIT */
-
enum {
UNUSED, /* Indicates the end of the operand list */
R_8, /* GPR starting at position 8 */
@@ -536,12 +530,10 @@ static char *long_insn_name[] = {
};
static struct s390_insn opcode[] = {
-#ifdef CONFIG_64BIT
{ "bprp", 0xc5, INSTR_MII_UPI },
{ "bpp", 0xc7, INSTR_SMI_U0RDP },
{ "trtr", 0xd0, INSTR_SS_L0RDRD },
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
-#endif
{ "spm", 0x04, INSTR_RR_R0 },
{ "balr", 0x05, INSTR_RR_RR },
{ "bctr", 0x06, INSTR_RR_RR },
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = {
};
static struct s390_insn opcode_01[] = {
-#ifdef CONFIG_64BIT
{ "ptff", 0x04, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
{ "sam64", 0x0e, INSTR_E },
-#endif
{ "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E },
{ "sckpf", 0x07, INSTR_E },
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = {
};
static struct s390_insn opcode_a5[] = {
-#ifdef CONFIG_64BIT
{ "iihh", 0x00, INSTR_RI_RU },
{ "iihl", 0x01, INSTR_RI_RU },
{ "iilh", 0x02, INSTR_RI_RU },
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = {
{ "llihl", 0x0d, INSTR_RI_RU },
{ "llilh", 0x0e, INSTR_RI_RU },
{ "llill", 0x0f, INSTR_RI_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_a7[] = {
-#ifdef CONFIG_64BIT
{ "tmhh", 0x02, INSTR_RI_RU },
{ "tmhl", 0x03, INSTR_RI_RU },
{ "brctg", 0x07, INSTR_RI_RP },
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = {
{ "aghi", 0x0b, INSTR_RI_RI },
{ "mghi", 0x0d, INSTR_RI_RI },
{ "cghi", 0x0f, INSTR_RI_RI },
-#endif
{ "tmlh", 0x00, INSTR_RI_RU },
{ "tmll", 0x01, INSTR_RI_RU },
{ "brc", 0x04, INSTR_RI_UP },
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = {
};
static struct s390_insn opcode_aa[] = {
-#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
{ "tric", 0x02, INSTR_RI_RI },
{ "rioff", 0x03, INSTR_RI_RI },
{ { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_b2[] = {
-#ifdef CONFIG_64BIT
{ "stckf", 0x7c, INSTR_S_RD },
{ "lpp", 0x80, INSTR_S_RD },
{ "lcctl", 0x84, INSTR_S_RD },
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = {
{ "tend", 0xf8, INSTR_S_00 },
{ "niai", 0xfa, INSTR_IE_UU },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
-#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
{ "stck", 0x05, INSTR_S_RD },
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = {
};
static struct s390_insn opcode_b3[] = {
-#ifdef CONFIG_64BIT
{ "maylr", 0x38, INSTR_RRF_F0FF },
{ "mylr", 0x39, INSTR_RRF_F0FF },
{ "mayr", 0x3a, INSTR_RRF_F0FF },
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = {
{ "qaxtr", 0xfd, INSTR_RRF_FUFF },
{ "iextr", 0xfe, INSTR_RRF_F0FR },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
-#endif
{ "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF },
{ "ltebr", 0x02, INSTR_RRE_FF },
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = {
};
static struct s390_insn opcode_b9[] = {
-#ifdef CONFIG_64BIT
{ "lpgr", 0x00, INSTR_RRE_RR },
{ "lngr", 0x01, INSTR_RRE_RR },
{ "ltgr", 0x02, INSTR_RRE_RR },
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = {
{ "srk", 0xf9, INSTR_RRF_R0RR2 },
{ "alrk", 0xfa, INSTR_RRF_R0RR2 },
{ "slrk", 0xfb, INSTR_RRF_R0RR2 },
-#endif
{ "kmac", 0x1e, INSTR_RRE_RR },
{ "lrvr", 0x1f, INSTR_RRE_RR },
{ "km", 0x2e, INSTR_RRE_RR },
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = {
};
static struct s390_insn opcode_c0[] = {
-#ifdef CONFIG_64BIT
{ "lgfi", 0x01, INSTR_RIL_RI },
{ "xihf", 0x06, INSTR_RIL_RU },
{ "xilf", 0x07, INSTR_RIL_RU },
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = {
{ "oilf", 0x0d, INSTR_RIL_RU },
{ "llihf", 0x0e, INSTR_RIL_RU },
{ "llilf", 0x0f, INSTR_RIL_RU },
-#endif
{ "larl", 0x00, INSTR_RIL_RP },
{ "brcl", 0x04, INSTR_RIL_UP },
{ "brasl", 0x05, INSTR_RIL_RP },
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = {
};
static struct s390_insn opcode_c2[] = {
-#ifdef CONFIG_64BIT
{ "msgfi", 0x00, INSTR_RIL_RI },
{ "msfi", 0x01, INSTR_RIL_RI },
{ "slgfi", 0x04, INSTR_RIL_RU },
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c4[] = {
-#ifdef CONFIG_64BIT
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
{ "lhrl", 0x05, INSTR_RIL_RP },
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = {
{ "lrl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c6[] = {
-#ifdef CONFIG_64BIT
{ "exrl", 0x00, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
{ "cghrl", 0x04, INSTR_RIL_RP },
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = {
{ "crl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
{ "clrl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c8[] = {
-#ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
{ "lpd", 0x04, INSTR_SSF_RRDRD2 },
{ "lpdg", 0x05, INSTR_SSF_RRDRD2 },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_cc[] = {
-#ifdef CONFIG_64BIT
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
{ "alsih", 0x0a, INSTR_RIL_RI },
{ { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
{ "cih", 0x0d, INSTR_RIL_RI },
{ "clih", 0x0f, INSTR_RIL_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_e3[] = {
-#ifdef CONFIG_64BIT
{ "ltg", 0x02, INSTR_RXY_RRRD },
{ "lrag", 0x03, INSTR_RXY_RRRD },
{ "lg", 0x04, INSTR_RXY_RRRD },
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = {
{ "clhf", 0xcf, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
-#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
{ "strv", 0x3e, INSTR_RXY_RRRD },
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = {
};
static struct s390_insn opcode_e5[] = {
-#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = {
{ { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
-#endif
{ "lasp", 0x00, INSTR_SSE_RDRD },
{ "tprot", 0x01, INSTR_SSE_RDRD },
{ "mvcsk", 0x0e, INSTR_SSE_RDRD },
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = {
};
static struct s390_insn opcode_e7[] = {
-#ifdef CONFIG_64BIT
{ "lcbb", 0x27, INSTR_RXE_RRRDM },
{ "vgef", 0x13, INSTR_VRV_VVRDM },
{ "vgeg", 0x12, INSTR_VRV_VVRDM },
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = {
{ "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM },
-#endif
};
static struct s390_insn opcode_eb[] = {
-#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
{ "srag", 0x0a, INSTR_RSY_RRRD },
{ "slag", 0x0b, INSTR_RSY_RRRD },
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = {
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
{ { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
-#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
{ "tp", 0xc0, INSTR_RSL_R0RD },
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = {
};
static struct s390_insn opcode_ec[] = {
-#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
{ { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = {
{ "clgib", 0xfd, INSTR_RIS_RURDU },
{ "cib", 0xfe, INSTR_RIS_RURDI },
{ "clib", 0xff, INSTR_RIS_RURDU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_ed[] = {
-#ifdef CONFIG_64BIT
{ "mayl", 0x38, INSTR_RXF_FRRDF },
{ "myl", 0x39, INSTR_RXF_FRRDF },
{ "may", 0x3a, INSTR_RXF_FRRDF },
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = {
{ "czxt", 0xa9, INSTR_RSL_LRDFU },
{ "cdzt", 0xaa, INSTR_RSL_LRDFU },
{ "cxzt", 0xab, INSTR_RSL_LRDFU },
-#endif
{ "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD },
{ "lxeb", 0x06, INSTR_RXE_FRRD },
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, ONELONG, addr);
+ ptr += sprintf(ptr, "%016lx: ", addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index a99852e..dc8e204 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -18,16 +18,6 @@
#include <asm/dis.h>
#include <asm/ipl.h>
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
+ for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if ((i * sizeof(long) % 32) == 0)
printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
+ printk("%016lx ", *stack++);
}
printk("\n");
show_trace(task, sp);
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
static void show_last_breaking_event(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
-#endif
}
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs)
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
+ printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4427ab7..549a73a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,7 +64,6 @@ asm(
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
-#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
@@ -72,13 +71,6 @@ asm(
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
-#else
- " stm 6,15,24(15)\n"
- " lr 14,3\n"
- " diag 2,14,0x8\n"
- " lr 2,14\n"
- " lm 6,15,24(15)\n"
-#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n"
" .previous\n");
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void)
static __init void setup_topology(void)
{
-#ifdef CONFIG_64BIT
int max_mnest;
if (!test_facility(11))
@@ -251,7 +242,6 @@ static __init void setup_topology(void)
break;
}
topology_max_mnest = max_mnest;
-#endif
}
static void early_pgm_check_handler(void)
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void)
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
}
-static __init void detect_mvpg(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " mvpg %2,%2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
-#endif
-}
-
-static __init void detect_ieee(void)
-{
-#ifndef CONFIG_64BIT
- int rc, tmp;
-
- asm volatile(
- " efpc %1,0\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
-#endif
-}
-
-static __init void detect_csp(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " la 1,0\n"
- " la 2,4\n"
- " csp 0,2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
-#endif
-}
-
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void)
static __init void detect_diag44(void)
{
-#ifdef CONFIG_64BIT
int rc;
asm volatile(
@@ -371,12 +308,10 @@ static __init void detect_diag44(void)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-#endif
}
static __init void detect_machine_facilities(void)
{
-#ifdef CONFIG_64BIT
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
__ctl_set_bit(0, 23);
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-#endif
}
static int __init cad_setup(char *str)
@@ -501,9 +435,6 @@ void __init startup_init(void)
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
- detect_mvpg();
- detect_ieee();
- detect_csp();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 398329b..99b44ac 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -22,27 +22,28 @@
#include <asm/irq.h>
__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 4
-__PT_R2 = __PT_GPRS + 8
-__PT_R3 = __PT_GPRS + 12
-__PT_R4 = __PT_GPRS + 16
-__PT_R5 = __PT_GPRS + 20
-__PT_R6 = __PT_GPRS + 24
-__PT_R7 = __PT_GPRS + 28
-__PT_R8 = __PT_GPRS + 32
-__PT_R9 = __PT_GPRS + 36
-__PT_R10 = __PT_GPRS + 40
-__PT_R11 = __PT_GPRS + 44
-__PT_R12 = __PT_GPRS + 48
-__PT_R13 = __PT_GPRS + 524
-__PT_R14 = __PT_GPRS + 56
-__PT_R15 = __PT_GPRS + 60
+__PT_R1 = __PT_GPRS + 8
+__PT_R2 = __PT_GPRS + 16
+__PT_R3 = __PT_GPRS + 24
+__PT_R4 = __PT_GPRS + 32
+__PT_R5 = __PT_GPRS + 40
+__PT_R6 = __PT_GPRS + 48
+__PT_R7 = __PT_GPRS + 56
+__PT_R8 = __PT_GPRS + 64
+__PT_R9 = __PT_GPRS + 72
+__PT_R10 = __PT_GPRS + 80
+__PT_R11 = __PT_GPRS + 88
+__PT_R12 = __PT_GPRS + 96
+__PT_R13 = __PT_GPRS + 104
+__PT_R14 = __PT_GPRS + 112
+__PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
+_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+ _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_on)
- basr %r14,%r1 # call trace_hardirqs_on_caller
+ brasl %r14,trace_hardirqs_on_caller
#endif
.endm
.macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_off)
- basr %r14,%r1 # call trace_hardirqs_off_caller
+ brasl %r14,trace_hardirqs_off_caller
#endif
.endm
@@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP)
#ifdef CONFIG_LOCKDEP
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
- l %r1,BASED(.Lc_lockdep_sys_exit)
- basr %r14,%r1 # call lockdep_sys_exit
+ brasl %r14,lockdep_sys_exit
+#endif
+ .endm
+
+ .macro LPP newpp
+#if IS_ENABLED(CONFIG_KVM)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
+
+ .macro HANDLE_SIE_INTERCEPT scratch,reason
+#if IS_ENABLED(CONFIG_KVM)
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz .+62
+ lgr \scratch,%r9
+ slg \scratch,BASED(.Lsie_critical)
+ clg \scratch,BASED(.Lsie_critical_length)
+ .if \reason==1
+ # Some program interrupts are suppressing (e.g. protection).
+ # We must also check the instruction after SIE in that case.
+ # do_protection_exception will rewind to .Lrewind_pad
+ jh .+42
+ .else
+ jhe .+42
+ .endif
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ mvi __SF_EMPTY+31(%r15),\reason # set exit reason
#endif
.endm
.macro CHECK_STACK stacksize,savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,\stacksize - CONFIG_STACK_GUARD
- la %r14,\savearea
+ lghi %r14,\savearea
jz stack_overflow
#endif
.endm
.macro SWITCH_ASYNC savearea,stack,shift
- tmh %r8,0x0001 # interrupting from user ?
+ tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
- lr %r14,%r9
- sl %r14,BASED(.Lc_critical_start)
- cl %r14,BASED(.Lc_critical_length)
+ lgr %r14,%r9
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
jhe 0f
- la %r11,\savearea # inside critical section, do cleanup
- bras %r14,cleanup_critical
- tmh %r8,0x0001 # retest problem state after cleanup
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
-0: l %r14,\stack # are we already on the target stack?
- slr %r14,%r15
- sra %r14,\shift
+0: lg %r14,\stack # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: l %r15,\stack # load target stack
+1: lg %r15,\stack # load target stack
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
- .macro ADD64 high,low,timer
- al \high,\timer
- al \low,4+\timer
- brc 12,.+8
- ahi \high,1
- .endm
-
- .macro SUB64 high,low,timer
- sl \high,\timer
- sl \low,4+\timer
- brc 3,.+8
- ahi \high,-1
+ .macro UPDATE_VTIME scratch,enter_timer
+ lg \scratch,__LC_EXIT_TIMER
+ slg \scratch,\enter_timer
+ alg \scratch,__LC_USER_TIMER
+ stg \scratch,__LC_USER_TIMER
+ lg \scratch,__LC_LAST_UPDATE_TIMER
+ slg \scratch,__LC_EXIT_TIMER
+ alg \scratch,__LC_SYSTEM_TIMER
+ stg \scratch,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro UPDATE_VTIME high,low,enter_timer
- lm \high,\low,__LC_EXIT_TIMER
- SUB64 \high,\low,\enter_timer
- ADD64 \high,\low,__LC_USER_TIMER
- stm \high,\low,__LC_USER_TIMER
- lm \high,\low,__LC_LAST_UPDATE_TIMER
- SUB64 \high,\low,__LC_EXIT_TIMER
- ADD64 \high,\low,__LC_SYSTEM_TIMER
- stm \high,\low,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
+ .macro LAST_BREAK scratch
+ srag \scratch,%r10,23
+ jz .+10
+ stg %r10,__TI_last_break(%r12)
.endm
.macro REENABLE_IRQS
- st %r8,__LC_RETURN_PSW
+ stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW
.endm
+ .macro STCK savearea
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ .insn s,0xb27c0000,\savearea # store clock fast
+#else
+ .insn s,0xb2050000,\savearea # store clock
+#endif
+ .endm
+
.section .kprobes.text, "ax"
/*
@@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP)
* gpr2 = prev
*/
ENTRY(__switch_to)
- stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- l %r4,__THREAD_info(%r2) # get thread_info of prev
- l %r5,__THREAD_info(%r3) # get thread_info of next
- lr %r15,%r5
- ahi %r15,STACK_INIT # end of kernel stack of next
- st %r3,__LC_CURRENT # store task struct of next
- st %r5,__LC_THREAD_INFO # store thread info of next
- st %r15,__LC_KERNEL_STACK # store end of kernel stack
+ stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
+ stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
+ lg %r4,__THREAD_info(%r2) # get thread_info of prev
+ lg %r5,__THREAD_info(%r3) # get thread_info of next
+ lgr %r15,%r5
+ aghi %r15,STACK_INIT # end of kernel stack of next
+ stg %r3,__LC_CURRENT # store task struct of next
+ stg %r5,__LC_THREAD_INFO # store thread info of next
+ stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
- l %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
+ mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+ lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
+ lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
.L__critical_start:
@@ -170,75 +200,83 @@ ENTRY(__switch_to)
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stm:
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lhi %r14,_PIF_SYSCALL
+.Lsysc_stmg:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ lghi %r14,_PIF_SYSCALL
.Lsysc_per:
- l %r15,__LC_KERNEL_STACK
+ lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
.Lsysc_vtime:
- UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+ UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r13
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- st %r14,__PT_FLAGS(%r11)
+ stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- lh %r8,__PT_INT_CODE+2(%r11)
- sla %r8,2 # shift and test for svc0
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ llgh %r8,__PT_INT_CODE+2(%r11)
+ slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ llgfr %r1,%r1 # clear high word in r1
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
- lr %r8,%r1
- sla %r8,2
+ slag %r8,%r1,2
.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- st %r2,__PT_ORIG_GPR2(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r9,0(%r8,%r10) # get system call addr.
- tm __TI_flags+3(%r12),_TIF_TRACE
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stg %r2,__PT_ORIG_GPR2(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ tm __TI_flags+7(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
LOCKDEP_SYS_EXIT
.Lsysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lsysc_restore
- tm __PT_FLAGS+3(%r11),_PIF_WORK
+ tm __PT_FLAGS+7(%r11),_PIF_WORK
jnz .Lsysc_work
- tm __TI_flags+3(%r12),_TIF_WORK
- jnz .Lsysc_work # check for thread work
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
+ jnz .Lsysc_work # check for work
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lsysc_work
.Lsysc_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lsysc_done:
#
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lsysc_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
- tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
+#ifdef CONFIG_UPROBES
+ tm __TI_flags+7(%r12),_TIF_UPROBE
+ jo .Lsysc_uprobe_notify
+#endif
+ tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo .Lsysc_singlestep
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lsysc_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lsysc_uaccess
j .Lsysc_return # beware of critical section cleanup
@@ -246,109 +284,109 @@ ENTRY(system_call)
# _TIF_NEED_RESCHED is set, call schedule
#
.Lsysc_reschedule:
- l %r1,BASED(.Lc_schedule)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call schedule
+ larl %r14,.Lsysc_return
+ jg schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
.Lsysc_mcck_pending:
- l %r1,BASED(.Lc_handle_mcck)
- la %r14,BASED(.Lsysc_return)
- br %r1 # TIF bit will be cleared by handler
+ larl %r14,.Lsysc_return
+ jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
#
.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return
#
# _TIF_SIGPENDING is set, call do_signal
#
.Lsysc_sigpending:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_signal)
- basr %r14,%r1 # call do_signal
- tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
+ tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
jno .Lsysc_return
- lm %r2,%r7,__PT_R2(%r11) # load svc arguments
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- xr %r8,%r8 # svc 0 returns -ENOSYS
- clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+ lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ lghi %r8,0 # svc 0 returns -ENOSYS
+ llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
- sla %r8,2
+ slag %r8,%r1,2
j .Lsysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
.Lsysc_notify_resume:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_notify_resume)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_notify_resume
+
+#
+# _TIF_UPROBE is set, call uprobe_notify_resume
+#
+#ifdef CONFIG_UPROBES
+.Lsysc_uprobe_notify:
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg uprobe_notify_resume
+#endif
#
# _PIF_PER_TRAP is set, call do_per_trap
#
.Lsysc_singlestep:
- ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_per_trap)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_per_trap
+ ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
.Lsysc_tracesys:
- l %r1,BASED(.Lc_trace_enter)
- lr %r2,%r11 # pass pointer to pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0
- xr %r0,%r0
- icm %r0,3,__PT_INT_CODE+2(%r11)
- st %r0,__PT_R2(%r11)
- basr %r14,%r1 # call do_syscall_trace_enter
- cl %r2,BASED(.Lnr_syscalls)
- jnl .Lsysc_tracenogo
- lr %r8,%r2
- sll %r8,2
- l %r9,0(%r8,%r10)
+ llgh %r0,__PT_INT_CODE+2(%r11)
+ stg %r0,__PT_R2(%r11)
+ brasl %r14,do_syscall_trace_enter
+ lghi %r0,NR_syscalls
+ clgr %r0,%r2
+ jnh .Lsysc_tracenogo
+ sllg %r8,%r2,2
+ lgf %r9,0(%r8,%r10)
.Lsysc_tracego:
- lm %r3,%r7,__PT_R3(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r2,__PT_ORIG_GPR2(%r11)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r9 # call sys_xxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
- tm __TI_flags+3(%r12),_TIF_TRACE
+ tm __TI_flags+7(%r12),_TIF_TRACE
jz .Lsysc_return
- l %r1,BASED(.Lc_trace_exit)
- lr %r2,%r11 # pass pointer to pt_regs
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_syscall_trace_exit
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_syscall_trace_exit
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- l %r1,BASED(.Lc_schedule_tail)
- basr %r14,%r1 # call schedule_tail
+ lg %r12,__LC_THREAD_INFO
+ brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo
# it's a kernel thread
- lm %r9,%r10,__PT_R9(%r11) # load gprs
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
basr %r14,%r9
@@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_PGM_OLD_PSW
- tmh %r8,0x0001 # test problem state bit
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,1
+ tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
- tmh %r8,0x4000 # PER bit set in old PSW ?
+ tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 0f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
- l %r15,__LC_KERNEL_STACK
+1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r14
+ lg %r15,__LC_KERNEL_STACK
+ lg %r14,__TI_task(%r12)
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+ jz 2f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f
- l %r1,__TI_task(%r12)
- tmh %r8,0x0001 # kernel per event ?
+ tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
- oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
0: REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_jump_table)
- la %r10,0x7f
- n %r10,__PT_INT_CODE(%r11)
- je .Lsysc_return
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+ nill %r10,0x007f
sll %r10,2
- l %r1,0(%r10,%r1) # load address of handler routine
- lr %r2,%r11 # pass pointer to pt_regs
+ je .Lsysc_return
+ lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j .Lsysc_return
@@ -408,54 +454,55 @@ ENTRY(pgm_check_handler)
#
.Lpgm_kprobe:
REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_per_trap)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_per_trap
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_per_trap
j .Lsysc_return
#
# single stepped system call
#
.Lpgm_svcper:
- mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
- mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
- lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ larl %r14,.Lsysc_per
+ stg %r14,__LC_RETURN_PSW+8
+ lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
/*
* IO interrupt handler routine
*/
-
ENTRY(io_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_IO_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_IO_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,2
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user?
jz .Lio_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lio_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,IO_INTERRUPT
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
jz .Lio_call
- lhi %r3,THIN_INTERRUPT
+ lghi %r3,THIN_INTERRUPT
.Lio_call:
- basr %r14,%r1 # call do_IRQ
- tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
+ brasl %r14,do_IRQ
+ tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
jz .Lio_return
tpi 0
jz .Lio_return
@@ -465,21 +512,26 @@ ENTRY(io_int_handler)
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
.Lio_tif:
- tm __TI_flags+3(%r12),_TIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lio_work
.Lio_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lio_done:
#
# There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and preemptive scheduling is enabled check
+# 2) if we return to kernel code and kvm is enabled check if we need to
+# modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
@@ -489,21 +541,20 @@ ENTRY(io_int_handler)
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption disabled
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ jnz .Lio_restore # preemption is disabled
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
# switch to kernel stack
- l %r1,__PT_R15(%r11)
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r1,__PT_R15(%r11)
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
# TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_preempt_irq)
- basr %r14,%r1 # call preempt_schedule_irq
+ brasl %r14,preempt_schedule_irq
j .Lio_return
#else
j .Lio_restore
@@ -513,25 +564,25 @@ ENTRY(io_int_handler)
# Need to do work before returning to userspace, switch to kernel stack
#
.Lio_work_user:
- l %r1,__LC_KERNEL_STACK
+ lg %r1,__LC_KERNEL_STACK
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
#
# One of the work bits is on. Find out which one.
#
.Lio_work_tif:
- tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lio_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lio_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lio_uaccess
j .Lio_return # beware of critical section cleanup
@@ -540,8 +591,7 @@ ENTRY(io_int_handler)
#
.Lio_mcck_pending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # TIF bit will be cleared by handler
+ brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
TRACE_IRQS_OFF
j .Lio_return
@@ -549,8 +599,8 @@ ENTRY(io_int_handler)
# _CIF_ASCE is set, load user space asce
#
.Lio_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return
#
@@ -558,35 +608,32 @@ ENTRY(io_int_handler)
#
.Lio_reschedule:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_schedule)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- basr %r14,%r1 # call scheduler
+ brasl %r14,schedule # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_SIGPENDING or is set, call do_signal
#
.Lio_sigpending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_signal)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_signal
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
.Lio_notify_resume:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_notify_resume)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
@@ -594,45 +641,47 @@ ENTRY(io_int_handler)
/*
* External interrupt handler routine
*/
-
ENTRY(ext_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_EXT_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_EXT_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,3
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user ?
jz .Lext_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lext_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ lghi %r1,__LC_EXT_PARAMS2
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,EXT_INTERRUPT
- basr %r14,%r1 # call do_IRQ
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,EXT_INTERRUPT
+ brasl %r14,do_IRQ
j .Lio_return
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
- st %r3,__SF_EMPTY(%r15)
- basr %r1,0
- la %r1,.Lpsw_idle_lpsw+4-.(%r1)
- st %r1,__SF_EMPTY+4(%r15)
- oi __SF_EMPTY+4(%r15),0x80
- stck __CLOCK_IDLE_ENTER(%r2)
+ stg %r3,__SF_EMPTY(%r15)
+ larl %r1,.Lpsw_idle_lpsw+4
+ stg %r1,__SF_EMPTY+8(%r15)
+ STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
- lpsw __SF_EMPTY(%r15)
+ lpswe __SF_EMPTY(%r15)
br %r14
.Lpsw_idle_end:
@@ -641,17 +690,19 @@ ENTRY(psw_idle)
/*
* Machine check handler routines
*/
-
ENTRY(mcck_int_handler)
- stck __LC_MCCK_CLOCK
- spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
- lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_MCK_OLD_PSW
+ STCK __LC_MCCK_CLOCK
+ la %r1,4095 # revalidate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_MCK_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,4
tm __LC_MCCK_CODE,0x80 # system damage?
jo .Lmcck_panic # yes -> rest of mcck code invalid
- la %r14,__LC_CPU_TIMER_SAVE_AREA
+ lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 3f
@@ -669,76 +720,76 @@ ENTRY(mcck_int_handler)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno .Lmcck_panic # no -> skip cleanup critical
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
tm %r8,0x0001 # interrupting from user ?
jz .Lmcck_skip
- UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+ LAST_BREAK %r14
.Lmcck_skip:
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
- stm %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_machine_check)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call s390_do_machine_check
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lmcck_return
- l %r1,__LC_KERNEL_STACK # switch to kernel stack
+ lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lr %r15,%r1
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
+ lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # call s390_handle_mcck
+ brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
.Lmcck_return:
- mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
- lm %r0,%r15,__PT_R0(%r11)
stpt __LC_EXIT_TIMER
- lpsw __LC_RETURN_MCCK_PSW
-0: lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_MCCK_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0: lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_MCCK_PSW
.Lmcck_panic:
- l %r14,__LC_PANIC_STACK
- slr %r14,%r15
- sra %r14,PAGE_SHIFT
+ lg %r14,__LC_PANIC_STACK
+ slgr %r14,%r15
+ srag %r14,%r14,PAGE_SHIFT
jz 0f
- l %r15,__LC_PANIC_STACK
- j .Lmcck_skip
-0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r15,__LC_PANIC_STACK
+0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j .Lmcck_skip
#
# PSW restart interrupt handler
#
ENTRY(restart_int_handler)
- st %r15,__LC_SAVE_AREA_RESTART
- l %r15,__LC_RESTART_STACK
- ahi %r15,-__PT_SIZE # create pt_regs on stack
+ stg %r15,__LC_SAVE_AREA_RESTART
+ lg %r15,__LC_RESTART_STACK
+ aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
- stm %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
- ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
+ stmg %r0,%r14,__PT_R0(%r15)
+ mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+ mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+ aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- l %r1,__LC_RESTART_FN # load fn, parm & source cpu
- l %r2,__LC_RESTART_DATA
- l %r3,__LC_RESTART_SOURCE
- ltr %r3,%r3 # test source cpu address
+ lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
+ lg %r2,__LC_RESTART_DATA
+ lg %r3,__LC_RESTART_SOURCE
+ ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
brc 10,0b # wait for status stored
1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
- lh %r3,__SF_EMPTY(%r15)
+ llgh %r3,__SF_EMPTY(%r15)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
@@ -752,215 +803,257 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- l %r15,__LC_PANIC_STACK # change to panic stack
+ lg %r15,__LC_PANIC_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- stm %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(32,%r11),0(%r14)
- l %r1,BASED(1f)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- lr %r2,%r11 # pass pointer to pt_regs
- br %r1 # branch to kernel_stack_overflow
-1: .long kernel_stack_overflow
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ jg kernel_stack_overflow
#endif
+ .align 8
.Lcleanup_table:
- .long system_call + 0x80000000
- .long .Lsysc_do_svc + 0x80000000
- .long .Lsysc_tif + 0x80000000
- .long .Lsysc_restore + 0x80000000
- .long .Lsysc_done + 0x80000000
- .long .Lio_tif + 0x80000000
- .long .Lio_restore + 0x80000000
- .long .Lio_done + 0x80000000
- .long psw_idle + 0x80000000
- .long .Lpsw_idle_end + 0x80000000
+ .quad system_call
+ .quad .Lsysc_do_svc
+ .quad .Lsysc_tif
+ .quad .Lsysc_restore
+ .quad .Lsysc_done
+ .quad .Lio_tif
+ .quad .Lio_restore
+ .quad .Lio_done
+ .quad psw_idle
+ .quad .Lpsw_idle_end
cleanup_critical:
- cl %r9,BASED(.Lcleanup_table) # system_call
+ clg %r9,BASED(.Lcleanup_table) # system_call
jl 0f
- cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
+ clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
jl .Lcleanup_system_call
- cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
+ clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
+ clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
jl .Lcleanup_sysc_tif
- cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
+ clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
jl .Lcleanup_sysc_restore
- cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
+ clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
+ clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
jl .Lcleanup_io_tif
- cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
+ clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
jl .Lcleanup_io_restore
- cl %r9,BASED(.Lcleanup_table+32) # psw_idle
+ clg %r9,BASED(.Lcleanup_table+64) # psw_idle
jl 0f
- cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
+ clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
jl .Lcleanup_idle
0: br %r14
+
.Lcleanup_system_call:
# check if stpt has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn)
+ clg %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stm has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn+4)
+0: # check if stmg has been executed
+ clg %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
- mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
-0: # set up saved registers r12, and r13
- st %r12,16(%r11) # r12 thread-info pointer
- st %r13,20(%r11) # r13 literal-pool pointer
- # check if the user time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+8)
+ mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
+0: # check if base register setup + TIF bit load has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+16)
+ jhe 0f
+ # set up saved registers r10 and r12
+ stg %r10,16(%r11) # r10 last break
+ stg %r12,32(%r11) # r12 thread-info pointer
+0: # check if the user time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
- l %r10,__LC_EXIT_TIMER
- l %r15,__LC_EXIT_TIMER+4
- SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
- ADD64 %r10,%r15,__LC_USER_TIMER
- st %r10,__LC_USER_TIMER
- st %r15,__LC_USER_TIMER+4
-0: # check if the system time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+12)
+ lg %r15,__LC_EXIT_TIMER
+ slg %r15,__LC_SYNC_ENTER_TIMER
+ alg %r15,__LC_USER_TIMER
+ stg %r15,__LC_USER_TIMER
+0: # check if the system time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+32)
jh 0f
- l %r10,__LC_LAST_UPDATE_TIMER
- l %r15,__LC_LAST_UPDATE_TIMER+4
- SUB64 %r10,%r15,__LC_EXIT_TIMER
- ADD64 %r10,%r15,__LC_SYSTEM_TIMER
- st %r10,__LC_SYSTEM_TIMER
- st %r15,__LC_SYSTEM_TIMER+4
+ lg %r15,__LC_LAST_UPDATE_TIMER
+ slg %r15,__LC_EXIT_TIMER
+ alg %r15,__LC_SYSTEM_TIMER
+ stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # set up saved register 11
- l %r15,__LC_KERNEL_STACK
+ # do LAST_BREAK
+ lg %r9,16(%r11)
+ srag %r9,%r9,23
+ jz 0f
+ mvc __TI_last_break(8,%r12),16(%r11)
+0: # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
- st %r9,12(%r11) # r11 pt_regs pointer
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
- # setup saved register 15
- st %r15,28(%r11) # r15 stack pointer
+ xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
+ mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
+ # setup saved register r15
+ stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
- l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
+ larl %r9,.Lsysc_do_svc
br %r14
.Lcleanup_system_call_insn:
- .long system_call + 0x80000000
- .long .Lsysc_stm + 0x80000000
- .long .Lsysc_vtime + 0x80000000 + 36
- .long .Lsysc_vtime + 0x80000000 + 76
+ .quad system_call
+ .quad .Lsysc_stmg
+ .quad .Lsysc_per
+ .quad .Lsysc_vtime+18
+ .quad .Lsysc_vtime+42
.Lcleanup_sysc_tif:
- l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
+ larl %r9,.Lsysc_tif
br %r14
.Lcleanup_sysc_restore:
- cl %r9,BASED(.Lcleanup_sysc_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_sysc_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
- .long .Lsysc_done - 4 + 0x80000000
+ .quad .Lsysc_done - 4
.Lcleanup_io_tif:
- l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
+ larl %r9,.Lio_tif
br %r14
.Lcleanup_io_restore:
- cl %r9,BASED(.Lcleanup_io_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_io_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved r11 pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
- .long .Lio_done - 4 + 0x80000000
+ .quad .Lio_done - 4
.Lcleanup_idle:
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck has been executed
- cl %r9,BASED(.Lcleanup_idle_insn)
+0: # check if stck & stpt have been executed
+ clg %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # account system time going idle
- lm %r9,%r10,__LC_STEAL_TIMER
- ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
- SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
- stm %r9,%r10,__LC_STEAL_TIMER
+ lg %r9,__LC_STEAL_TIMER
+ alg %r9,__CLOCK_IDLE_ENTER(%r2)
+ slg %r9,__LC_LAST_UPDATE_CLOCK
+ stg %r9,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lm %r9,%r10,__LC_SYSTEM_TIMER
- ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
- SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
- stm %r9,%r10,__LC_SYSTEM_TIMER
+ lg %r9,__LC_SYSTEM_TIMER
+ alg %r9,__LC_LAST_UPDATE_TIMER
+ slg %r9,__TIMER_IDLE_ENTER(%r2)
+ stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
- l %r9,24(%r11) # return from psw_idle
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
br %r14
.Lcleanup_idle_insn:
- .long .Lpsw_idle_lpsw + 0x80000000
-.Lcleanup_idle_wait:
- .long 0xfcfdffff
+ .quad .Lpsw_idle_lpsw
/*
* Integer constants
*/
- .align 4
-.Lnr_syscalls:
- .long NR_syscalls
-.Lvtimer_max:
- .quad 0x7fffffffffffffff
+ .align 8
+.Lcritical_start:
+ .quad .L__critical_start
+.Lcritical_length:
+ .quad .L__critical_end - .L__critical_start
+
+#if IS_ENABLED(CONFIG_KVM)
/*
- * Symbol constants
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
*/
-.Lc_do_machine_check: .long s390_do_machine_check
-.Lc_handle_mcck: .long s390_handle_mcck
-.Lc_do_IRQ: .long do_IRQ
-.Lc_do_signal: .long do_signal
-.Lc_do_notify_resume: .long do_notify_resume
-.Lc_do_per_trap: .long do_per_trap
-.Lc_jump_table: .long pgm_check_table
-.Lc_schedule: .long schedule
-#ifdef CONFIG_PREEMPT
-.Lc_preempt_irq: .long preempt_schedule_irq
-#endif
-.Lc_trace_enter: .long do_syscall_trace_enter
-.Lc_trace_exit: .long do_syscall_trace_exit
-.Lc_schedule_tail: .long schedule_tail
-.Lc_sysc_per: .long .Lsysc_per + 0x80000000
-#ifdef CONFIG_TRACE_IRQFLAGS
-.Lc_hardirqs_on: .long trace_hardirqs_on_caller
-.Lc_hardirqs_off: .long trace_hardirqs_off_caller
-#endif
-#ifdef CONFIG_LOCKDEP
-.Lc_lockdep_sys_exit: .long lockdep_sys_exit
+ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ lg %r14,__LC_GMAP # get gmap pointer
+ ltgr %r14,%r14
+ jz .Lsie_gmap
+ lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
+.Lsie_gmap:
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
+ tm __SIE_PROG20+3(%r14),1 # last exit...
+ jnz .Lsie_done
+ LPP __SF_EMPTY(%r15) # set guest id
+ sie 0(%r14)
+.Lsie_done:
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+.Lrewind_pad:
+ nop 0
+ .globl sie_exit
+sie_exit:
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
+ br %r14
+.Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
+ j sie_exit
+
+ .align 8
+.Lsie_critical:
+ .quad .Lsie_gmap
+.Lsie_critical_length:
+ .quad .Lsie_done - .Lsie_gmap
+
+ EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(sie_exit,.Lsie_fault)
#endif
-.Lc_critical_start: .long .L__critical_start + 0x80000000
-.Lc_critical_length: .long .L__critical_end - .L__critical_start
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esa
+ .section .rodata, "a"
+#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
+
+#ifdef CONFIG_COMPAT
+
+#define SYSCALL(esame,emu) .long emu
+ .globl sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
deleted file mode 100644
index c329446..0000000
--- a/arch/s390/kernel/entry64.S
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * S390 low-level entry points.
- *
- * Copyright IBM Corp. 1999, 2012
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Hartmut Penner (hp@de.ibm.com),
- * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/page.h>
-#include <asm/sigp.h>
-#include <asm/irq.h>
-
-__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 8
-__PT_R2 = __PT_GPRS + 16
-__PT_R3 = __PT_GPRS + 24
-__PT_R4 = __PT_GPRS + 32
-__PT_R5 = __PT_GPRS + 40
-__PT_R6 = __PT_GPRS + 48
-__PT_R7 = __PT_GPRS + 56
-__PT_R8 = __PT_GPRS + 64
-__PT_R9 = __PT_GPRS + 72
-__PT_R10 = __PT_GPRS + 80
-__PT_R11 = __PT_GPRS + 88
-__PT_R12 = __PT_GPRS + 96
-__PT_R13 = __PT_GPRS + 104
-__PT_R14 = __PT_GPRS + 112
-__PT_R15 = __PT_GPRS + 120
-
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
-STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_UPROBE)
-_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
-_PIF_WORK = (_PIF_PER_TRAP)
-
-#define BASED(name) name-system_call(%r13)
-
- .macro TRACE_IRQS_ON
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
-#endif
- .endm
-
- .macro TRACE_IRQS_OFF
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
-#endif
- .endm
-
- .macro LOCKDEP_SYS_EXIT
-#ifdef CONFIG_LOCKDEP
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jz .+10
- brasl %r14,lockdep_sys_exit
-#endif
- .endm
-
- .macro LPP newpp
-#if IS_ENABLED(CONFIG_KVM)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz .+8
- .insn s,0xb2800000,\newpp
-#endif
- .endm
-
- .macro HANDLE_SIE_INTERCEPT scratch,reason
-#if IS_ENABLED(CONFIG_KVM)
- tmhh %r8,0x0001 # interrupting from user ?
- jnz .+62
- lgr \scratch,%r9
- slg \scratch,BASED(.Lsie_critical)
- clg \scratch,BASED(.Lsie_critical_length)
- .if \reason==1
- # Some program interrupts are suppressing (e.g. protection).
- # We must also check the instruction after SIE in that case.
- # do_protection_exception will rewind to .Lrewind_pad
- jh .+42
- .else
- jhe .+42
- .endif
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- larl %r9,sie_exit # skip forward to sie_exit
- mvi __SF_EMPTY+31(%r15),\reason # set exit reason
-#endif
- .endm
-
- .macro CHECK_STACK stacksize,savearea
-#ifdef CONFIG_CHECK_STACK
- tml %r15,\stacksize - CONFIG_STACK_GUARD
- lghi %r14,\savearea
- jz stack_overflow
-#endif
- .endm
-
- .macro SWITCH_ASYNC savearea,stack,shift
- tmhh %r8,0x0001 # interrupting from user ?
- jnz 1f
- lgr %r14,%r9
- slg %r14,BASED(.Lcritical_start)
- clg %r14,BASED(.Lcritical_length)
- jhe 0f
- lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,cleanup_critical
- tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,\stack # are we already on the target stack?
- slgr %r14,%r15
- srag %r14,%r14,\shift
- jnz 1f
- CHECK_STACK 1<<\shift,\savearea
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: lg %r15,\stack # load target stack
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- .endm
-
- .macro UPDATE_VTIME scratch,enter_timer
- lg \scratch,__LC_EXIT_TIMER
- slg \scratch,\enter_timer
- alg \scratch,__LC_USER_TIMER
- stg \scratch,__LC_USER_TIMER
- lg \scratch,__LC_LAST_UPDATE_TIMER
- slg \scratch,__LC_EXIT_TIMER
- alg \scratch,__LC_SYSTEM_TIMER
- stg \scratch,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
- .endm
-
- .macro LAST_BREAK scratch
- srag \scratch,%r10,23
- jz .+10
- stg %r10,__TI_last_break(%r12)
- .endm
-
- .macro REENABLE_IRQS
- stg %r8,__LC_RETURN_PSW
- ni __LC_RETURN_PSW,0xbf
- ssm __LC_RETURN_PSW
- .endm
-
- .macro STCK savearea
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- .insn s,0xb27c0000,\savearea # store clock fast
-#else
- .insn s,0xb2050000,\savearea # store clock
-#endif
- .endm
-
- .section .kprobes.text, "ax"
-
-/*
- * Scheduler resume function, called by switch_to
- * gpr2 = (task_struct *) prev
- * gpr3 = (task_struct *) next
- * Returns:
- * gpr2 = prev
- */
-ENTRY(__switch_to)
- stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
- lg %r5,__THREAD_info(%r3) # get thread_info of next
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
- stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
- stg %r15,__LC_KERNEL_STACK # store end of kernel stack
- lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
- lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- br %r14
-
-.L__critical_start:
-/*
- * SVC interrupt handler routine. System calls are synchronous events and
- * are executed with interrupts enabled.
- */
-
-ENTRY(system_call)
- stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- lghi %r14,_PIF_SYSCALL
-.Lsysc_per:
- lg %r15,__LC_KERNEL_STACK
- la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
- UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r13
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- stg %r14,__PT_FLAGS(%r11)
-.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,2 # shift and test for svc 0
- jnz .Lsysc_nr_ok
- # svc 0: system call number in %r1
- llgfr %r1,%r1 # clear high word in r1
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok
- sth %r1,__PT_INT_CODE+2(%r11)
- slag %r8,%r1,2
-.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stg %r2,__PT_ORIG_GPR2(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lgf %r9,0(%r8,%r10) # get system call add.
- tm __TI_flags+7(%r12),_TIF_TRACE
- jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
- stg %r2,__PT_R2(%r11) # store return value
-
-.Lsysc_return:
- LOCKDEP_SYS_EXIT
-.Lsysc_tif:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lsysc_restore
- tm __PT_FLAGS+7(%r11),_PIF_WORK
- jnz .Lsysc_work
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lsysc_work # check for work
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lsysc_work
-.Lsysc_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lsysc_done:
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lsysc_work:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lsysc_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lsysc_reschedule
-#ifdef CONFIG_UPROBES
- tm __TI_flags+7(%r12),_TIF_UPROBE
- jo .Lsysc_uprobe_notify
-#endif
- tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- jo .Lsysc_singlestep
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lsysc_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lsysc_uaccess
- j .Lsysc_return # beware of critical section cleanup
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lsysc_reschedule:
- larl %r14,.Lsysc_return
- jg schedule
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
- larl %r14,.Lsysc_return
- jg s390_handle_mcck # TIF bit will be cleared by handler
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lsysc_return
-
-#
-# _TIF_SIGPENDING is set, call do_signal
-#
-.Lsysc_sigpending:
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
- jno .Lsysc_return
- lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- lghi %r8,0 # svc 0 returns -ENOSYS
- llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- slag %r8,%r1,2
- j .Lsysc_nr_ok # restart svc
-
-#
-# _TIF_NOTIFY_RESUME is set, call do_notify_resume
-#
-.Lsysc_notify_resume:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_notify_resume
-
-#
-# _TIF_UPROBE is set, call uprobe_notify_resume
-#
-#ifdef CONFIG_UPROBES
-.Lsysc_uprobe_notify:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg uprobe_notify_resume
-#endif
-
-#
-# _PIF_PER_TRAP is set, call do_per_trap
-#
-.Lsysc_singlestep:
- ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_per_trap
-
-#
-# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
-# and after the system call
-#
-.Lsysc_tracesys:
- lgr %r2,%r11 # pass pointer to pt_regs
- la %r3,0
- llgh %r0,__PT_INT_CODE+2(%r11)
- stg %r0,__PT_R2(%r11)
- brasl %r14,do_syscall_trace_enter
- lghi %r0,NR_syscalls
- clgr %r0,%r2
- jnh .Lsysc_tracenogo
- sllg %r8,%r2,2
- lgf %r9,0(%r8,%r10)
-.Lsysc_tracego:
- lmg %r3,%r7,__PT_R3(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
- stg %r2,__PT_R2(%r11) # store return value
-.Lsysc_tracenogo:
- tm __TI_flags+7(%r12),_TIF_TRACE
- jz .Lsysc_return
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_syscall_trace_exit
-
-#
-# a new process exits the kernel with ret_from_fork
-#
-ENTRY(ret_from_fork)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
- brasl %r14,schedule_tail
- TRACE_IRQS_ON
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne .Lsysc_tracenogo
- # it's a kernel thread
- lmg %r9,%r10,__PT_R9(%r11) # load gprs
-ENTRY(kernel_thread_starter)
- la %r2,0(%r10)
- basr %r14,%r9
- j .Lsysc_tracenogo
-
-/*
- * Program check handler routine
- */
-
-ENTRY(pgm_check_handler)
- stpt __LC_SYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_PGM_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,1
- tmhh %r8,0x0001 # test problem state bit
- jnz 1f # -> fault in user space
- tmhh %r8,0x4000 # PER bit set in old PSW ?
- jnz 0f # -> enabled, can't be a double fault
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jnz .Lpgm_svcper # -> single stepped svc
-0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r14
- lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
- lghi %r13,__LC_PGM_TDB
- tm __LC_PGM_ILC+2,0x02 # check for transaction abort
- jz 2f
- mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- stg %r10,__PT_ARGS(%r11)
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 0f
- tmhh %r8,0x0001 # kernel per event ?
- jz .Lpgm_kprobe
- oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-0: REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- larl %r1,pgm_check_table
- llgh %r10,__PT_INT_CODE+2(%r11)
- nill %r10,0x007f
- sll %r10,2
- je .Lsysc_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
- lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
- j .Lsysc_return
-
-#
-# PER event in supervisor state, must be kprobes
-#
-.Lpgm_kprobe:
- REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_per_trap
- j .Lsysc_return
-
-#
-# single stepped system call
-#
-.Lpgm_svcper:
- mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- larl %r14,.Lsysc_per
- stg %r14,__LC_RETURN_PSW+8
- lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
-
-/*
- * IO interrupt handler routine
- */
-ENTRY(io_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_IO_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,2
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user?
- jz .Lio_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lio_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-.Lio_loop:
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,IO_INTERRUPT
- tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz .Lio_call
- lghi %r3,THIN_INTERRUPT
-.Lio_call:
- brasl %r14,do_IRQ
- tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
- jz .Lio_return
- tpi 0
- jz .Lio_return
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j .Lio_loop
-.Lio_return:
- LOCKDEP_SYS_EXIT
- TRACE_IRQS_ON
-.Lio_tif:
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lio_work
-.Lio_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lio_done:
-
-#
-# There is work todo, find out in which context we have been interrupted:
-# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and kvm is enabled check if we need to
-# modify the psw to leave SIE
-# 3) if we return to kernel code and preemptive scheduling is enabled check
-# the preemption counter and if it is zero call preempt_schedule_irq
-# Before any work can be done, a switch to the kernel stack is required.
-#
-.Lio_work:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo .Lio_work_user # yes -> do resched & signal
-#ifdef CONFIG_PREEMPT
- # check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption is disabled
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jno .Lio_restore
- # switch to kernel stack
- lg %r1,__PT_R15(%r11)
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- # TRACE_IRQS_ON already done at .Lio_return, call
- # TRACE_IRQS_OFF to keep things symmetrical
- TRACE_IRQS_OFF
- brasl %r14,preempt_schedule_irq
- j .Lio_return
-#else
- j .Lio_restore
-#endif
-
-#
-# Need to do work before returning to userspace, switch to kernel stack
-#
-.Lio_work_user:
- lg %r1,__LC_KERNEL_STACK
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lio_work_tif:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lio_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lio_reschedule
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lio_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lio_uaccess
- j .Lio_return # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
- # TRACE_IRQS_ON already done at .Lio_return
- brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lio_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lio_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lio_reschedule:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- brasl %r14,schedule # call scheduler
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_SIGPENDING or is set, call do_signal
-#
-.Lio_sigpending:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
-#
-.Lio_notify_resume:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_notify_resume
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-/*
- * External interrupt handler routine
- */
-ENTRY(ext_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_EXT_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,3
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user ?
- jz .Lext_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lext_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- lghi %r1,__LC_EXT_PARAMS2
- mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
- mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,EXT_INTERRUPT
- brasl %r14,do_IRQ
- j .Lio_return
-
-/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
- */
-ENTRY(psw_idle)
- stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_lpsw+4
- stg %r1,__SF_EMPTY+8(%r15)
- STCK __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
- lpswe __SF_EMPTY(%r15)
- br %r14
-.Lpsw_idle_end:
-
-.L__critical_end:
-
-/*
- * Machine check handler routines
- */
-ENTRY(mcck_int_handler)
- STCK __LC_MCCK_CLOCK
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_MCK_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,4
- tm __LC_MCCK_CODE,0x80 # system damage?
- jo .Lmcck_panic # yes -> rest of mcck code invalid
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- jo 3f
- la %r14,__LC_SYNC_ENTER_TIMER
- clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
- jl 0f
- la %r14,__LC_ASYNC_ENTER_TIMER
-0: clc 0(8,%r14),__LC_EXIT_TIMER
- jl 1f
- la %r14,__LC_EXIT_TIMER
-1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
- jl 2f
- la %r14,__LC_LAST_UPDATE_TIMER
-2: spt 0(%r14)
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno .Lmcck_panic # no -> skip cleanup critical
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
- tm %r8,0x0001 # interrupting from user ?
- jz .Lmcck_skip
- UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
- LAST_BREAK %r14
-.Lmcck_skip:
- lghi %r14,__LC_GPREGS_SAVE_AREA+64
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stmg %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,s390_do_machine_check
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lmcck_return
- lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jno .Lmcck_return
- TRACE_IRQS_OFF
- brasl %r14,s390_handle_mcck
- TRACE_IRQS_ON
-.Lmcck_return:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
- tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
- jno 0f
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-0: lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_MCCK_PSW
-
-.Lmcck_panic:
- lg %r14,__LC_PANIC_STACK
- slgr %r14,%r15
- srag %r14,%r14,PAGE_SHIFT
- jz 0f
- lg %r15,__LC_PANIC_STACK
-0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j .Lmcck_skip
-
-#
-# PSW restart interrupt handler
-#
-ENTRY(restart_int_handler)
- stg %r15,__LC_SAVE_AREA_RESTART
- lg %r15,__LC_RESTART_STACK
- aghi %r15,-__PT_SIZE # create pt_regs on stack
- xc 0(__PT_SIZE,%r15),0(%r15)
- stmg %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
- aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
- xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
- lg %r2,__LC_RESTART_DATA
- lg %r3,__LC_RESTART_SOURCE
- ltgr %r3,%r3 # test source cpu address
- jm 1f # negative -> skip source stop
-0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
- brc 10,0b # wait for status stored
-1: basr %r14,%r1 # call function
- stap __SF_EMPTY(%r15) # store cpu address
- llgh %r3,__SF_EMPTY(%r15)
-2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
- brc 2,2b
-3: j 3b
-
- .section .kprobes.text, "ax"
-
-#ifdef CONFIG_CHECK_STACK
-/*
- * The synchronous or the asynchronous stack overflowed. We are dead.
- * No need to properly save the registers, we are going to panic anyway.
- * Setup a pt_regs so that show_trace can provide a good call trace.
- */
-stack_overflow:
- lg %r15,__LC_PANIC_STACK # change to panic stack
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- jg kernel_stack_overflow
-#endif
-
- .align 8
-.Lcleanup_table:
- .quad system_call
- .quad .Lsysc_do_svc
- .quad .Lsysc_tif
- .quad .Lsysc_restore
- .quad .Lsysc_done
- .quad .Lio_tif
- .quad .Lio_restore
- .quad .Lio_done
- .quad psw_idle
- .quad .Lpsw_idle_end
-
-cleanup_critical:
- clg %r9,BASED(.Lcleanup_table) # system_call
- jl 0f
- clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
- jl .Lcleanup_system_call
- clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
- jl .Lcleanup_sysc_tif
- clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
- jl .Lcleanup_sysc_restore
- clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
- jl .Lcleanup_io_tif
- clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
- jl .Lcleanup_io_restore
- clg %r9,BASED(.Lcleanup_table+64) # psw_idle
- jl 0f
- clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
- jl .Lcleanup_idle
-0: br %r14
-
-
-.Lcleanup_system_call:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn)
- jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stmg has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn+8)
- jh 0f
- mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
-0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+16)
- jhe 0f
- # set up saved registers r10 and r12
- stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
-0: # check if the user time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+24)
- jh 0f
- lg %r15,__LC_EXIT_TIMER
- slg %r15,__LC_SYNC_ENTER_TIMER
- alg %r15,__LC_USER_TIMER
- stg %r15,__LC_USER_TIMER
-0: # check if the system time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+32)
- jh 0f
- lg %r15,__LC_LAST_UPDATE_TIMER
- slg %r15,__LC_EXIT_TIMER
- alg %r15,__LC_SYSTEM_TIMER
- stg %r15,__LC_SYSTEM_TIMER
-0: # update accounting time stamp
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # do LAST_BREAK
- lg %r9,16(%r11)
- srag %r9,%r9,23
- jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
- lg %r15,__LC_KERNEL_STACK
- la %r9,STACK_FRAME_OVERHEAD(%r15)
- stg %r9,24(%r11) # r11 pt_regs pointer
- # fill pt_regs
- mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
- # setup saved register r15
- stg %r15,56(%r11) # r15 stack pointer
- # set new psw address and exit
- larl %r9,.Lsysc_do_svc
- br %r14
-.Lcleanup_system_call_insn:
- .quad system_call
- .quad .Lsysc_stmg
- .quad .Lsysc_per
- .quad .Lsysc_vtime+18
- .quad .Lsysc_vtime+42
-
-.Lcleanup_sysc_tif:
- larl %r9,.Lsysc_tif
- br %r14
-
-.Lcleanup_sysc_restore:
- clg %r9,BASED(.Lcleanup_sysc_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_sysc_restore_insn:
- .quad .Lsysc_done - 4
-
-.Lcleanup_io_tif:
- larl %r9,.Lio_tif
- br %r14
-
-.Lcleanup_io_restore:
- clg %r9,BASED(.Lcleanup_io_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_io_restore_insn:
- .quad .Lio_done - 4
-
-.Lcleanup_idle:
- # copy interrupt clock & cpu timer
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck & stpt have been executed
- clg %r9,BASED(.Lcleanup_idle_insn)
- jhe 1f
- mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # account system time going idle
- lg %r9,__LC_STEAL_TIMER
- alg %r9,__CLOCK_IDLE_ENTER(%r2)
- slg %r9,__LC_LAST_UPDATE_CLOCK
- stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lg %r9,__LC_SYSTEM_TIMER
- alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__TIMER_IDLE_ENTER(%r2)
- stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
- # prepare return psw
- nihh %r8,0xfcfd # clear irq & wait state bits
- lg %r9,48(%r11) # return from psw_idle
- br %r14
-.Lcleanup_idle_insn:
- .quad .Lpsw_idle_lpsw
-
-/*
- * Integer constants
- */
- .align 8
-.Lcritical_start:
- .quad .L__critical_start
-.Lcritical_length:
- .quad .L__critical_end - .L__critical_start
-
-
-#if IS_ENABLED(CONFIG_KVM)
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
-ENTRY(sie64a)
- stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
- stg %r2,__SF_EMPTY(%r15) # save control block pointer
- stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
- xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
- lg %r14,__LC_GMAP # get gmap pointer
- ltgr %r14,%r14
- jz .Lsie_gmap
- lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
-.Lsie_gmap:
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
- tm __SIE_PROG20+3(%r14),1 # last exit...
- jnz .Lsie_done
- LPP __SF_EMPTY(%r15) # set guest id
- sie 0(%r14)
-.Lsie_done:
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
-# See also HANDLE_SIE_INTERCEPT
-.Lrewind_pad:
- nop 0
- .globl sie_exit
-sie_exit:
- lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
- lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
- br %r14
-.Lsie_fault:
- lghi %r14,-EFAULT
- stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
- j sie_exit
-
- .align 8
-.Lsie_critical:
- .quad .Lsie_gmap
-.Lsie_critical_length:
- .quad .Lsie_done - .Lsie_gmap
-
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
- EX_TABLE(sie_exit,.Lsie_fault)
-#endif
-
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esame
- .globl sys_call_table
-sys_call_table:
-#include "syscalls.S"
-#undef SYSCALL
-
-#ifdef CONFIG_COMPAT
-
-#define SYSCALL(esa,esame,emu) .long emu
- .globl sys_call_table_emu
-sys_call_table_emu:
-#include "syscalls.S"
-#undef SYSCALL
-#endif
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c1989..e0eaf11 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
unsigned long ftrace_plt;
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
+ /* stg r14,8(r15) */
+ insn->opc = 0xe3e0;
+ insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ if (insn->opc == BREAKPOINT_INSTRUCTION)
+ return 1;
+#endif
+ return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EFAULT;
if (addr == MCOUNT_ADDR) {
/* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
- /* We expect to see brcl 0,0 */
- ftrace_generate_nop_insn(&orig);
-#else
- /* We expect to see stg r14,8(r15) */
- orig.opc = 0xe3e0;
- orig.disp = 0xf0080024;
-#endif
+ ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
- } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+ } else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_CALL;
- new.disp = KPROBE_ON_FTRACE_NOP;
+ ftrace_generate_kprobe_call_insn(&orig);
+ ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@@ -100,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -111,7 +140,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (old.opc == BREAKPOINT_INSTRUCTION) {
+ if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -119,9 +148,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_NOP;
- new.disp = KPROBE_ON_FTRACE_CALL;
+ ftrace_generate_kprobe_nop_insn(&orig);
+ ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);
@@ -130,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -202,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void)
{
u8 op = 0x04; /* set mask field to zero */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
u8 op = 0xf4; /* set mask field to all ones */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 132f4c9..59b7c64 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,11 +27,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
-#else
-#define ARCH_OFFSET 0
-#endif
__HEAD
@@ -67,7 +63,6 @@ __HEAD
# subroutine to set architecture mode
#
.Lsetmode:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -76,16 +71,12 @@ __HEAD
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
br %r14
#
# subroutine to wait for end I/O
#
.Lirqwait:
-#ifdef CONFIG_64BIT
mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
lpsw .Lwaitpsw
.Lioint:
@@ -93,15 +84,6 @@ __HEAD
.align 8
.Lnewpsw:
.quad 0x0000000080000000,.Lioint
-#else
- mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
-.Lioint:
- br %r14
- .align 8
-.Lnewpsw:
- .long 0x00080000,0x80000000+.Lioint
-#endif
.Lwaitpsw:
.long 0x020a0000,0x80000000+.Lioint
@@ -375,7 +357,6 @@ ENTRY(startup)
ENTRY(startup_kdump)
j .Lep_startup_kdump
.Lep_startup_normal:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -384,9 +365,6 @@ ENTRY(startup_kdump)
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
@@ -396,7 +374,6 @@ ENTRY(startup_kdump)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
-#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
@@ -435,7 +412,6 @@ ENTRY(startup_kdump)
# the kernel will crash. Format is number of facility words with bits set,
# followed by the facility words.
-#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_Z13)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_ZEC12)
@@ -451,35 +427,10 @@ ENTRY(startup_kdump)
#elif defined(CONFIG_MARCH_Z900)
.long 1, 0xc0000000
#endif
-#else
-#if defined(CONFIG_MARCH_ZEC12)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z196)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z10)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z9_109)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z990)
- .long 1, 0x80002000
-#elif defined(CONFIG_MARCH_Z900)
- .long 1, 0x80000000
-#endif
-#endif
4:
-#endif
-
-#ifdef CONFIG_64BIT
/* Continue with 64bit startup code in head64.S */
sam64 # switch to 64 bit mode
jg startup_continue
-#else
- /* Continue with 31bit startup code in head31.S */
- l %r13,5f-.LPG0(%r13)
- b 0(%r13)
- .align 8
-5: .long startup_continue
-#endif
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
deleted file mode 100644
index 6dbe809..0000000
--- a/arch/s390/kernel/head31.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright IBM Corp. 2005, 2010
- *
- * Author(s): Hartmut Penner <hp@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Rob van der Heij <rvdhei@iae.nl>
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-
-__HEAD
-ENTRY(startup_continue)
- basr %r13,0 # get base
-.LPG1:
-
- l %r1,.Lbase_cc-.LPG1(%r13)
- mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
- lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
-#
-# Setup stack
-#
- l %r15,.Linittu-.LPG1(%r13)
- st %r15,__LC_THREAD_INFO # cache thread info in lowcore
- mvc __LC_CURRENT(4),__TI_task(%r15)
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
- st %r15,__LC_KERNEL_STACK # set end of kernel stack
- ahi %r15,-96
-#
-# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
-# and create a kernel NSS if the SAVESYS= parm is defined
-#
- l %r14,.Lstartup_init-.LPG1(%r13)
- basr %r14,%r14
- lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
- .align 8
-.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50000 # cr0: various things
- .long 0 # cr1: primary space segment table
- .long .Lduct # cr2: dispatchable unit control table
- .long 0 # cr3: instruction authorization
- .long 0 # cr4: instruction authorization
- .long .Lduct # cr5: primary-aste origin
- .long 0 # cr6: I/O interrupts
- .long 0 # cr7: secondary space segment table
- .long 0 # cr8: access registers translation
- .long 0 # cr9: tracing off
- .long 0 # cr10: tracing off
- .long 0 # cr11: tracing off
- .long 0 # cr12: tracing off
- .long 0 # cr13: home space segment table
- .long 0xc0000000 # cr14: machine check handling off
- .long 0 # cr15: linkage stack operations
-.Lbss_bgn: .long __bss_start
-.Lbss_end: .long _end
-.Lparmaddr: .long PARMAREA
-.Linittu: .long init_thread_union
-.Lstartup_init:
- .long startup_init
- .align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
- .long 0,0,0,0,0,0,0,0
- .align 128
-.Lduald:.rept 8
- .long 0x80000000,0,0,0 # invalid access-list entries
- .endr
-.Lbase_cc:
- .long sched_clock_base_cc
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctl %c0,%c15,0(%r15)
- oi 2(%r15),0x60 # enable sigp emergency & external call
- oi 0(%r15),0x10 # switch on low address protection
- lctl %c0,%c15,0(%r15)
-
-#
- lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
- l %r14,.Lstart-.LPG3(%r13)
- basr %r14,%r14 # call start_kernel
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpsw .Ldw-.(%r13) # load disabled wait psw
-#
- .align 8
-.Ldw: .long 0x000a0000,0x00000000
-.Lstart:.long start_kernel
-.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 085a95e..d05950f 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -92,17 +92,9 @@ startup_kdump_relocated:
#else
.align 2
.Lep_startup_kdump:
-#ifdef CONFIG_64BIT
larl %r13,startup_kdump_crash
lpswe 0(%r13)
.align 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
-#else
- basr %r13,0
-0: lpsw startup_kdump_crash-0b(%r13)
-.align 8
-startup_kdump_crash:
- .long 0x000a0000,0x00000000 + startup_kdump_crash
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5c8651f..52fbef9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
-#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \
static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
- return sprintf(page, _format, _value); \
-} \
+ return snprintf(page, PAGE_SIZE, _format, ##args); \
+}
+
+#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
- __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
+ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL)
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, \
- (unsigned long long) _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
static void make_attrs_ro(struct attribute **attrs)
{
@@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
}
-
-static struct bin_attribute ipl_parameter_attr = {
- .attr = {
- .name = "binary_parameter",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = &ipl_parameter_read,
-};
+static struct bin_attribute ipl_parameter_attr =
+ __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL,
+ PAGE_SIZE);
static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
@@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
+static struct bin_attribute ipl_scp_data_attr =
+ __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
-static struct bin_attribute ipl_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = ipl_scp_data_read,
+static struct bin_attribute *ipl_fcp_bin_attrs[] = {
+ &ipl_parameter_attr,
+ &ipl_scp_data_attr,
+ NULL,
};
/* FCP ipl device attributes */
@@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = {
static struct attribute_group ipl_fcp_attr_group = {
.attrs = ipl_fcp_attrs,
+ .bin_attrs = ipl_fcp_bin_attrs,
};
/* CCW ipl device attributes */
@@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = {
static struct kset *ipl_kset;
-static int __init ipl_register_fcp_files(void)
-{
- int rc;
-
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
- if (rc)
- goto out;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
- if (rc)
- goto out_ipl_parm;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
- if (!rc)
- goto out;
-
- sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
-
-out_ipl_parm:
- sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
-out:
- return rc;
-}
-
static void __ipl_run(void *unused)
{
diag308(DIAG308_IPL, NULL);
@@ -596,7 +560,7 @@ static int __init ipl_init(void)
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- rc = ipl_register_fcp_files();
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
case IPL_TYPE_NSS:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
@@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
return count;
}
+static struct bin_attribute sys_reipl_fcp_scp_data_attr =
+ __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
+ reipl_fcp_scpdata_write, PAGE_SIZE);
-static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = PAGE_SIZE,
- .read = reipl_fcp_scpdata_read,
- .write = reipl_fcp_scpdata_write,
+static struct bin_attribute *reipl_fcp_bin_attrs[] = {
+ &sys_reipl_fcp_scp_data_attr,
+ NULL,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
@@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = {
static struct attribute_group reipl_fcp_attr_group = {
.attrs = reipl_fcp_attrs,
+ .bin_attrs = reipl_fcp_bin_attrs,
};
/* CCW reipl device attributes */
@@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void)
return rc;
}
- rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
- &sys_reipl_fcp_scp_data_attr);
- if (rc) {
- sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
- kset_unregister(reipl_fcp_kset);
- free_page((unsigned long) reipl_block_fcp);
- return rc;
- }
-
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
/*
@@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj,
{
return set_trigger(buf, &on_reboot_trigger, len);
}
-
-static struct kobj_attribute on_reboot_attr =
- __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
+static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot);
static void do_machine_restart(char *__unused)
{
@@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj,
{
return set_trigger(buf, &on_panic_trigger, len);
}
-
-static struct kobj_attribute on_panic_attr =
- __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic);
static void do_panic(void)
{
@@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj,
{
return set_trigger(buf, &on_restart_trigger, len);
}
-
-static struct kobj_attribute on_restart_attr =
- __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
+static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
@@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj,
{
return set_trigger(buf, &on_halt_trigger, len);
}
-
-static struct kobj_attribute on_halt_attr =
- __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
-
+static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt);
static void do_machine_halt(void)
{
@@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj,
{
return set_trigger(buf, &on_poff_trigger, len);
}
-
-static struct kobj_attribute on_poff_attr =
- __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
-
+static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff);
static void do_machine_power_off(void)
{
@@ -1850,26 +1792,27 @@ static void do_machine_power_off(void)
}
void (*_machine_power_off)(void) = do_machine_power_off;
+static struct attribute *shutdown_action_attrs[] = {
+ &on_restart_attr.attr,
+ &on_reboot_attr.attr,
+ &on_panic_attr.attr,
+ &on_halt_attr.attr,
+ &on_poff_attr.attr,
+ NULL,
+};
+
+static struct attribute_group shutdown_action_attr_group = {
+ .attrs = shutdown_action_attrs,
+};
+
static void __init shutdown_triggers_init(void)
{
shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
firmware_kobj);
if (!shutdown_actions_kset)
goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_reboot_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_panic_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_halt_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_poff_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_restart_attr.attr))
+ if (sysfs_create_group(&shutdown_actions_kset->kobj,
+ &shutdown_action_attr_group))
goto fail;
return;
fail:
@@ -2062,12 +2005,10 @@ static void do_reset_calls(void)
{
struct reset_call *reset;
-#ifdef CONFIG_64BIT
if (diag308_set_works) {
diag308_reset();
return;
}
-#endif
list_for_each_entry(reset, &rcall, list)
reset->fn();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f238720..02ab9aa 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
-static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
{.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
{.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
@@ -94,6 +94,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
void __init init_IRQ(void)
{
+ BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
init_cio_interrupts();
init_airq_interrupts();
init_ext_interrupts();
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 830066f..a902996 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry,
if (memcmp((void *)entry->code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
}
- probe_kernel_write((void *)entry->code, &new, sizeof(new));
+ s390_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index f516edc..389db56 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -178,7 +178,7 @@ static int swap_instruction(void *data)
}
skip_ftrace:
kcb->kprobe_status = KPROBE_SWAP_INST;
- probe_kernel_write(p->addr, &new_insn, len);
+ s390_kernel_write(p->addr, &new_insn, len);
kcb->kprobe_status = status;
return 0;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 2ca9586..0c1a679 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -38,13 +38,8 @@
#define DEBUGP(fmt , ...)
#endif
-#ifndef CONFIG_64BIT
-#define PLT_ENTRY_SIZE 12
-#else /* CONFIG_64BIT */
#define PLT_ENTRY_SIZE 20
-#endif /* CONFIG_64BIT */
-#ifdef CONFIG_64BIT
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size)
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
-#endif
void module_arch_freeing_init(struct module *mod)
{
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
-#ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
- ip[1] = 0x100607f1;
- ip[2] = val;
-#else /* CONFIG_64BIT */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
-#endif /* CONFIG_64BIT */
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f51cf4..505c17c 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
kill_task = 1;
}
-#ifndef CONFIG_64BIT
+ fpt_save_area = &S390_lowcore.floating_pt_save_area;
+ fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
+ if (!mci->fc) {
+ /*
+ * Floating point control register can't be restored.
+ * Task will be terminated.
+ */
+ asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+ kill_task = 1;
+ } else
+ asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+
asm volatile(
" ld 0,0(%0)\n"
- " ld 2,8(%0)\n"
- " ld 4,16(%0)\n"
- " ld 6,24(%0)"
- : : "a" (&S390_lowcore.floating_pt_save_area));
-#endif
-
- if (MACHINE_HAS_IEEE) {
-#ifdef CONFIG_64BIT
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
- fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
-#else
- fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
- fpt_creg_save_area = fpt_save_area + 128;
-#endif
- if (!mci->fc) {
- /*
- * Floating point control register can't be restored.
- * Task will be terminated.
- */
- asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
-
- } else
- asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
-
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
- }
-
-#ifdef CONFIG_64BIT
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ : : "a" (fpt_save_area));
/* Revalidate vector registers */
if (MACHINE_HAS_VX && current->thread.vxrs) {
if (!mci->vr) {
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr);
}
-#endif
/* Revalidate access registers */
asm volatile(
" lam 0,15,0(%0)"
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
s390_handle_damage("invalid control registers.");
} else {
-#ifdef CONFIG_64BIT
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
-#else
- asm volatile(
- " lctl 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
-#endif
}
/*
* We don't even try to revalidate the TOD register, since we simply
* can't write something sensible into that register.
*/
-#ifdef CONFIG_64BIT
/*
* See if we can revalidate the TOD programmable register with its
* old contents (should be zero) otherwise set it to zero.
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area)
: "0", "cc");
-#endif
/* Revalidate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci->b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
-#ifdef CONFIG_64BIT
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
-#else
- z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
- 1ULL<<29);
- o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
- 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
- 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
-#endif
t_mcic = *(u64 *)mci;
if (((t_mcic & z_mcic) != 0) ||
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d15..e6a1578 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+ NULL,
NULL,
};
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
- if (si.ad)
+ if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+ cpumsf_pmu_events_attr[1] =
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+ }
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index f6f8886..036aa01 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -6,19 +6,13 @@
#include <linux/linkage.h>
-#ifdef CONFIG_32BIT
-#define PGM_CHECK_64BIT(handler) .long default_trap_handler
-#else
-#define PGM_CHECK_64BIT(handler) .long handler
-#endif
-
#define PGM_CHECK(handler) .long handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
* The program check table contains exactly 128 (0x00-0x7f) entries. Each
- * line defines the 31 and/or 64 bit function to be called corresponding
- * to the program check interruption code.
+ * line defines the function to be called corresponding to the program check
+ * interruption code.
*/
.section .rodata, "a"
ENTRY(pgm_check_table)
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */
PGM_CHECK(operand_exception) /* 15 */
PGM_CHECK_DEFAULT /* 16 */
PGM_CHECK_DEFAULT /* 17 */
-PGM_CHECK_64BIT(transaction_exception) /* 18 */
+PGM_CHECK(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
-PGM_CHECK_64BIT(vector_exception) /* 1b */
+PGM_CHECK(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_64BIT(do_dat_exception) /* 38 */
-PGM_CHECK_64BIT(do_dat_exception) /* 39 */
-PGM_CHECK_64BIT(do_dat_exception) /* 3a */
-PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+PGM_CHECK(do_dat_exception) /* 38 */
+PGM_CHECK(do_dat_exception) /* 39 */
+PGM_CHECK(do_dat_exception) /* 3a */
+PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */
PGM_CHECK_DEFAULT /* 3e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 13fc097..dc5edc2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task)
{
}
-#ifdef CONFIG_64BIT
void arch_release_task_struct(struct task_struct *tsk)
{
if (tsk->thread.vxrs)
kfree(tsk->thread.vxrs);
}
-#endif
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.ri_signum = 0;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the child.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
- /* Set a new TLS ? */
- if (clone_flags & CLONE_SETTLS)
- p->thread.acrs[0] = frame->childregs.gprs[6];
-#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
save_fp_ctl(&p->thread.fp_regs.fpc);
save_fp_regs(p->thread.fp_regs.fprs);
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[1] = (unsigned int)tls;
}
}
-#endif /* CONFIG_64BIT */
return 0;
}
asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
- if (MACHINE_HAS_IEEE)
- asm volatile("sfpc %0,%0" : : "d" (0));
+ asm volatile("sfpc %0,%0" : : "d" (0));
}
/*
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void)
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the dump.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
-#else /* CONFIG_64BIT */
save_fp_ctl(&fpregs->fpc);
save_fp_regs(fpregs->fprs);
-#endif /* CONFIG_64BIT */
return 1;
}
EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index eabfb45..d363c9c 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task)
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
-#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
unsigned long cr, cr_new;
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task)
__ctl_load(cr_new, 2, 2);
}
}
-#endif
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task)
new.control |= PER_EVENT_BRANCH;
else
new.control |= PER_EVENT_IFETCH;
-#ifdef CONFIG_64BIT
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
-#endif
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0;
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task)
task->thread.per_flags = 0;
}
-#ifndef CONFIG_64BIT
-# define __ADDR_MASK 3
-#else
-# define __ADDR_MASK 7
-#endif
+#define __ADDR_MASK 7
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
if (addr == (addr_t) &dummy->regs.acrs[15])
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
-#endif
- tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
+ tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(addr_t *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(addr_t *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
if (addr == (addr_t) &dummy->regs.acrs[15])
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
-#endif
- *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+ *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(addr_t *)((addr_t)
child->thread.vxrs + 2*offset) = data;
else
-#endif
*(addr_t *)((addr_t)
&child->thread.fp_regs.fprs + offset) = data;
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(__u32 *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(__u32 *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child,
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(__u32 *)((addr_t)
child->thread.vxrs + 2*offset) = tmp;
else
-#endif
*(__u32 *)((addr_t)
&child->thread.fp_regs.fprs + offset) = tmp;
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target,
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fp_regs.fprs[i] =
*(freg_t *)(target->thread.vxrs + i);
}
-#endif
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
}
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current) {
restore_fp_ctl(&target->thread.fp_regs.fpc);
restore_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*(freg_t *)(target->thread.vxrs + i) =
target->thread.fp_regs.fprs[i];
}
-#endif
}
return rc;
}
-#ifdef CONFIG_64BIT
-
static int s390_last_break_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target,
return rc;
}
-#endif
-
static int s390_system_call_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_system_call_get,
.set = s390_system_call_set,
},
-#ifdef CONFIG_64BIT
{
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
-#endif
};
static const struct user_regset_view user_s390_view = {
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index dd8016b..52aab0b 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -1,7 +1,7 @@
/*
- * S390 version
- * Copyright IBM Corp. 2000
- * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ * Copyright IBM Corp 2000, 2011
+ * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ * Denis Joseph Barrow,
*/
#include <linux/linkage.h>
@@ -9,43 +9,90 @@
#include <asm/sigp.h>
#
-# store_status: Empty implementation until kdump is supported on 31 bit
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
#
ENTRY(store_status)
- br %r14
+ /* Save register one and load save area base */
+ stg %r1,__LC_SAVE_AREA_RESTART
+ lghi %r1,SAVE_AREA_BASE
+ /* General purpose registers */
+ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ lg %r2,__LC_SAVE_AREA_RESTART
+ stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+ /* Control registers */
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Access registers */
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point registers */
+ std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point control register */
+ stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* CPU timer */
+ stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Saved prefix register */
+ larl %r2,dump_prefix_page
+ mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+ /* Clock comparator - seven bytes */
+ larl %r2,.Lclkcmp
+ stckc 0(%r2)
+ mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+ /* Program status word */
+ epsw %r2,%r3
+ st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+ st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+ larl %r2,store_status
+ stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+ br %r14
+
+ .section .bss
+ .align 8
+.Lclkcmp: .quad 0x0000000000000000
+ .previous
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
+
ENTRY(do_reipl_asm)
basr %r13,0
-.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
-.Lpg1: # do store status of all registers
+.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
+.Lpg1: brasl %r14,store_status
- stm %r0,%r15,__LC_GPREGS_SAVE_AREA
- stctl %c0,%c15,__LC_CREGS_SAVE_AREA
- stam %a0,%a15,__LC_AREGS_SAVE_AREA
- l %r10,.Ldump_pfx-.Lpg0(%r13)
- mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
- stckc .Lclkcmp-.Lpg0(%r13)
- mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
- stpt __LC_CPU_TIMER_SAVE_AREA
- st %r13, __LC_PSW_SAVE_AREA+4
- lctl %c6,%c6,.Lall-.Lpg0(%r13)
- lr %r1,%r2
- mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+ lctlg %c6,%c6,.Lall-.Lpg0(%r13)
+ lgr %r1,%r2
+ mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
+.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
- lhi %r0,5
+ lghi %r0,5
.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
-.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
+.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
@@ -58,20 +105,36 @@ ENTRY(do_reipl_asm)
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
.L003: st %r1,__LC_SUBCHANNEL_ID
+ lhi %r1,0 # mode 0 = esa
+ slr %r0,%r0 # set cpuid to zero
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
lpsw 0
- sigp 0,0,SIGP_RESTART
-.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
- lpsw .Ldispsw-.Lpg0(%r13)
+.Ldisab: sll %r14,1
+ srl %r14,1 # need to kill hi bit to avoid specification exceptions.
+ st %r14,.Ldispsw+12-.Lpg0(%r13)
+ lpswe .Ldispsw-.Lpg0(%r13)
.align 8
-.Lclkcmp: .quad 0x0000000000000000
-.Lall: .long 0xff000000
-.Ldump_pfx: .long dump_prefix_page
- .align 8
-.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
-.Lpcnew: .long 0x00080000,0x80000000+.Lecs
-.Lionew: .long 0x00080000,0x80000000+.Lcont
-.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
-.Ldispsw: .long 0x000a0000,0x00000000
+.Lall: .quad 0x00000000ff000000
+ .align 16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * omitted from the pop.
+ */
+.Lnewpsw: .quad 0x0000000080000000
+ .quad .Lpg1
+.Lpcnew: .quad 0x0000000080000000
+ .quad .Lecs
+.Lionew: .quad 0x0000000080000000
+ .quad .Lcont
+.Lwaitpsw: .quad 0x0202000080000000
+ .quad .Ltpi
+.Ldispsw: .quad 0x0002000080000000
+ .quad 0x0000000000000000
.Liplccws: .long 0x02000000,0x60000018
.long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
deleted file mode 100644
index dc3b127..0000000
--- a/arch/s390/kernel/reipl64.S
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright IBM Corp 2000, 2011
- * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- * Denis Joseph Barrow,
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/sigp.h>
-
-#
-# store_status
-#
-# Prerequisites to run this function:
-# - Prefix register is set to zero
-# - Original prefix register is stored in "dump_prefix_page"
-# - Lowcore protection is off
-#
-ENTRY(store_status)
- /* Save register one and load save area base */
- stg %r1,__LC_SAVE_AREA_RESTART
- lghi %r1,SAVE_AREA_BASE
- /* General purpose registers */
- stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- lg %r2,__LC_SAVE_AREA_RESTART
- stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
- /* Control registers */
- stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Access registers */
- stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point registers */
- std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point control register */
- stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* CPU timer */
- stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Saved prefix register */
- larl %r2,dump_prefix_page
- mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
- /* Clock comparator - seven bytes */
- larl %r2,.Lclkcmp
- stckc 0(%r2)
- mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
- /* Program status word */
- epsw %r2,%r3
- st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
- st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
- larl %r2,store_status
- stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
- br %r14
-
- .section .bss
- .align 8
-.Lclkcmp: .quad 0x0000000000000000
- .previous
-
-#
-# do_reipl_asm
-# Parameter: r2 = schid of reipl device
-#
-
-ENTRY(do_reipl_asm)
- basr %r13,0
-.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: brasl %r14,store_status
-
- lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- lgr %r1,%r2
- mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lghi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
- jz .L001
- brct %r0,.Lssch
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
-.Lcont: c %r1,__LC_SUBCHANNEL_ID
- jnz .Ltpi
- clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
- jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
- tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: st %r1,__LC_SUBCHANNEL_ID
- lhi %r1,0 # mode 0 = esa
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
- lpsw 0
-.Ldisab: sll %r14,1
- srl %r14,1 # need to kill hi bit to avoid specification exceptions.
- st %r14,.Ldispsw+12-.Lpg0(%r13)
- lpswe .Ldispsw-.Lpg0(%r13)
- .align 8
-.Lall: .quad 0x00000000ff000000
- .align 16
-/*
- * These addresses have to be 31 bit otherwise
- * the sigp will throw a specifcation exception
- * when switching to ESA mode as bit 31 be set
- * in the ESA psw.
- * Bit 31 of the addresses has to be 0 for the
- * 31bit lpswe instruction a fact they appear to have
- * omitted from the pop.
- */
-.Lnewpsw: .quad 0x0000000080000000
- .quad .Lpg1
-.Lpcnew: .quad 0x0000000080000000
- .quad .Lecs
-.Lionew: .quad 0x0000000080000000
- .quad .Lcont
-.Lwaitpsw: .quad 0x0202000080000000
- .quad .Ltpi
-.Ldispsw: .quad 0x0002000080000000
- .quad 0x0000000000000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
-.Liplorb: .long 0x0049504c,0x0040ff80
- .long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
-.Liplirb: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f4e6f20..cfac283 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -19,7 +19,8 @@
* %r7 = PAGE_SIZE
* %r8 holds the source address
* %r9 = PAGE_SIZE
- * %r10 is a page mask
+ *
+ * 0xf000 is a page_mask
*/
.text
@@ -27,46 +28,47 @@ ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctl %c0,%c15,ctlregs-.base(%r13)
- stm %r0,%r15,gprregs-.base(%r13)
+ stctg %c0,%c15,ctlregs-.base(%r13)
+ stmg %r0,%r15,gprregs-.base(%r13)
+ lghi %r0,3
+ sllg %r0,%r0,31
+ stg %r0,0x1d0(%r0)
+ la %r0,.back_pgm-.base(%r13)
+ stg %r0,0x1d8(%r0)
la %r1,load_psw-.base(%r13)
mvc 0(8,%r0),0(%r1)
la %r0,.back-.base(%r13)
st %r0,4(%r0)
oi 4(%r0),0x80
- mvc 0x68(8,%r0),0(%r1)
- la %r0,.back_pgm-.base(%r13)
- st %r0,0x6c(%r0)
- oi 0x6c(%r0),0x80
- lhi %r0,0
+ lghi %r0,0
diag %r0,%r0,0x308
.back:
+ lhi %r1,1 # mode 1 = esame
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0
.back_base:
oi have_diag308-.back_base(%r13),0x01
- lctl %c0,%c15,ctlregs-.back_base(%r13)
- lm %r0,%r15,gprregs-.back_base(%r13)
- j .start_reloc
+ lctlg %c0,%c15,ctlregs-.back_base(%r13)
+ lmg %r0,%r15,gprregs-.back_base(%r13)
+ j .top
.back_pgm:
- lm %r0,%r15,gprregs-.base(%r13)
- .start_reloc:
- lhi %r10,-1 # preparing the mask
- sll %r10,12 # shift it such that it becomes 0xf000
+ lmg %r0,%r15,gprregs-.base(%r13)
.top:
- lhi %r7,4096 # load PAGE_SIZE in r7
- lhi %r9,4096 # load PAGE_SIZE in r9
- l %r5,0(%r2) # read another word for indirection page
- ahi %r2,4 # increment pointer
+ lghi %r7,4096 # load PAGE_SIZE in r7
+ lghi %r9,4096 # load PAGE_SIZE in r9
+ lg %r5,0(%r2) # read another word for indirection page
+ aghi %r2,8 # increment pointer
tml %r5,0x1 # is it a destination page?
je .indir_check # NO, goto "indir_check"
- lr %r6,%r5 # r6 = r5
- nr %r6,%r10 # mask it out and...
+ lgr %r6,%r5 # r6 = r5
+ nill %r6,0xf000 # mask it out and...
j .top # ...next iteration
.indir_check:
tml %r5,0x2 # is it a indirection page?
je .done_test # NO, goto "done_test"
- nr %r5,%r10 # YES, mask out,
- lr %r2,%r5 # move it into the right register,
+ nill %r5,0xf000 # YES, mask out,
+ lgr %r2,%r5 # move it into the right register,
j .top # and read next...
.done_test:
tml %r5,0x4 # is it the done indicator?
@@ -75,13 +77,13 @@ ENTRY(relocate_kernel)
.source_test:
tml %r5,0x8 # it should be a source indicator...
je .top # NO, ignore it...
- lr %r8,%r5 # r8 = r5
- nr %r8,%r10 # masking
+ lgr %r8,%r5 # r8 = r5
+ nill %r8,0xf000 # masking
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
j .top
.done:
- sr %r0,%r0 # clear register r0
+ sgr %r0,%r0 # clear register r0
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
@@ -90,8 +92,9 @@ ENTRY(relocate_kernel)
jno .no_diag308
diag %r0,%r0,0x308
.no_diag308:
- sr %r1,%r1 # clear %r1
- sr %r2,%r2 # clear %r2
+ sam31 # 31 bit mode
+ sr %r1,%r1 # erase register r1
+ sr %r2,%r2 # erase register r2
sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
lpsw 0 # hopefully start new kernel...
@@ -102,11 +105,11 @@ ENTRY(relocate_kernel)
.quad 0
ctlregs:
.rept 16
- .long 0
+ .quad 0
.endr
gprregs:
.rept 16
- .long 0
+ .quad 0
.endr
have_diag308:
.byte 0
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
deleted file mode 100644
index cfac283..0000000
--- a/arch/s390/kernel/relocate_kernel64.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright IBM Corp. 2005
- *
- * Author(s): Rolf Adelsberger,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/sigp.h>
-
-/*
- * moves the new kernel to its destination...
- * %r2 = pointer to first kimage_entry_t
- * %r3 = start address - where to jump to after the job is done...
- *
- * %r5 will be used as temp. storage
- * %r6 holds the destination address
- * %r7 = PAGE_SIZE
- * %r8 holds the source address
- * %r9 = PAGE_SIZE
- *
- * 0xf000 is a page_mask
- */
-
- .text
-ENTRY(relocate_kernel)
- basr %r13,0 # base address
- .base:
- stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctg %c0,%c15,ctlregs-.base(%r13)
- stmg %r0,%r15,gprregs-.base(%r13)
- lghi %r0,3
- sllg %r0,%r0,31
- stg %r0,0x1d0(%r0)
- la %r0,.back_pgm-.base(%r13)
- stg %r0,0x1d8(%r0)
- la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
- la %r0,.back-.base(%r13)
- st %r0,4(%r0)
- oi 4(%r0),0x80
- lghi %r0,0
- diag %r0,%r0,0x308
- .back:
- lhi %r1,1 # mode 1 = esame
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
- sam64 # switch to 64 bit addressing mode
- basr %r13,0
- .back_base:
- oi have_diag308-.back_base(%r13),0x01
- lctlg %c0,%c15,ctlregs-.back_base(%r13)
- lmg %r0,%r15,gprregs-.back_base(%r13)
- j .top
- .back_pgm:
- lmg %r0,%r15,gprregs-.base(%r13)
- .top:
- lghi %r7,4096 # load PAGE_SIZE in r7
- lghi %r9,4096 # load PAGE_SIZE in r9
- lg %r5,0(%r2) # read another word for indirection page
- aghi %r2,8 # increment pointer
- tml %r5,0x1 # is it a destination page?
- je .indir_check # NO, goto "indir_check"
- lgr %r6,%r5 # r6 = r5
- nill %r6,0xf000 # mask it out and...
- j .top # ...next iteration
- .indir_check:
- tml %r5,0x2 # is it a indirection page?
- je .done_test # NO, goto "done_test"
- nill %r5,0xf000 # YES, mask out,
- lgr %r2,%r5 # move it into the right register,
- j .top # and read next...
- .done_test:
- tml %r5,0x4 # is it the done indicator?
- je .source_test # NO! Well, then it should be the source indicator...
- j .done # ok, lets finish it here...
- .source_test:
- tml %r5,0x8 # it should be a source indicator...
- je .top # NO, ignore it...
- lgr %r8,%r5 # r8 = r5
- nill %r8,0xf000 # masking
- 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
- jo 0b
- j .top
- .done:
- sgr %r0,%r0 # clear register r0
- la %r4,load_psw-.base(%r13) # load psw-address into the register
- o %r3,4(%r4) # or load address into psw
- st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
- tm have_diag308-.base(%r13),0x01
- jno .no_diag308
- diag %r0,%r0,0x308
- .no_diag308:
- sam31 # 31 bit mode
- sr %r1,%r1 # erase register r1
- sr %r2,%r2 # erase register r2
- sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
- lpsw 0 # hopefully start new kernel...
-
- .align 8
- load_psw:
- .long 0x00080000,0x80000000
- sys_msk:
- .quad 0
- ctlregs:
- .rept 16
- .quad 0
- .endr
- gprregs:
- .rept 16
- .quad 0
- .endr
- have_diag308:
- .byte 0
- .align 8
- relocate_kernel_end:
- .align 8
- .globl relocate_kernel_len
- relocate_kernel_len:
- .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 7e77e03..43c3169 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -36,21 +36,17 @@ _sclp_wait_int:
ahi %r15,-96 # create stack frame
la %r8,LC_EXT_NEW_PSW # register int handler
la %r9,.LextpswS1-.LbaseS1(%r13)
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa1
la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
la %r9,.LextpswS1_64-.LbaseS1(%r13)
.Lesa1:
-#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
-#ifdef CONFIG_64BIT
epsw %r6,%r7 # set current addressing mode
nill %r6,0x1 # in new psw (31 or 64 bit mode)
nilh %r7,0x8000
stm %r6,%r7,0(%r8)
-#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
@@ -92,10 +88,8 @@ _sclp_wait_int:
.long 0, 0, 0, 0 # old ext int PSW
.LextpswS1:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
-#ifdef CONFIG_64BIT
.LextpswS1_64:
.quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
-#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
.LtimeS1:
@@ -272,13 +266,11 @@ _sclp_print:
ENTRY(_sclp_print_early)
stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
.Lesa2:
-#endif
lr %r10,%r2 # save string pointer
lhi %r2,0
bras %r14,_sclp_setup # enable console
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early)
lhi %r2,1
bras %r14,_sclp_setup # disable console
.LendS5:
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
lgfr %r2,%r2 # sign extend return value
lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
-#endif
lm %r6,%r15,120(%r15) # restore registers
br %r14
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5ea8bc..7262fe4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
-#ifdef CONFIG_64BIT
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
-#endif
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = (__u32)
- __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
- /* enable extended save area */
- __ctl_set_bit(14, 29);
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
-#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void)
unsigned long vmax, vmalloc_size, tmp;
/* Choose kernel address space layout: 2, 3, or 4 levels. */
-#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void)
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
-#else
- vmalloc_size = VMALLOC_END ?: 96UL << 20;
- vmax = 1UL << 31; /* 2-level kernel page table */
- /* vmalloc area is at the end of the kernel address space. */
- VMALLOC_END = vmax;
-#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
-#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void)
*/
if (test_facility(129))
elf_hwcap |= HWCAP_S390_VXRS;
-#endif
-
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
case 0x9672:
-#if !defined(CONFIG_64BIT)
- default: /* Use "g5" as default for 31 bit kernels. */
-#endif
strcpy(elf_platform, "g5");
break;
case 0x2064:
case 0x2066:
-#if defined(CONFIG_64BIT)
default: /* Use "z900" as default for 64 bit kernels. */
-#endif
strcpy(elf_platform, "z900");
break;
case 0x2084:
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p)
/*
* print what head.S has found out about the machine
*/
-#ifndef CONFIG_64BIT
- if (MACHINE_IS_VM)
- pr_info("Linux is running as a z/VM "
- "guest operating system in 31-bit mode\n");
- else if (MACHINE_IS_LPAR)
- pr_info("Linux is running natively in 31-bit mode\n");
- if (MACHINE_HAS_IEEE)
- pr_info("The hardware system has IEEE compatible "
- "floating point units\n");
- else
- pr_info("The hardware system has no IEEE compatible "
- "floating point units\n");
-#else /* CONFIG_64BIT */
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
-#endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p)
/* Add system specific data to the random pool */
setup_randomness();
}
-
-#ifdef CONFIG_32BIT
-static int no_removal_warning __initdata;
-
-static int __init parse_no_removal_warning(char *str)
-{
- no_removal_warning = 1;
- return 0;
-}
-__setup("no_removal_warning", parse_no_removal_warning);
-
-static int __init removal_warning(void)
-{
- if (no_removal_warning)
- return 0;
- printk(KERN_ALERT "\n\n");
- printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
- printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
- printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
- printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
- printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
- printk(KERN_CONT "please let us know. Please write to:\n");
- printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
- printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
- printk(KERN_CONT "Thank you!\n\n");
- printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
- printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
- schedule_timeout_uninterruptible(300 * HZ);
- return 0;
-}
-early_initcall(removal_warning);
-#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b3ae6f7..7fec60c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -106,7 +106,6 @@ static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -115,7 +114,6 @@ static void store_sigregs(void)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
-#endif
save_fp_regs(current->thread.fp_regs.fprs);
}
@@ -124,7 +122,6 @@ static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
/* restore_fp_ctl is done in restore_sigregs */
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -133,7 +130,6 @@ static void load_sigregs(void)
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
-#endif
restore_fp_regs(current->thread.fp_regs.fprs);
}
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
static int save_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
-#endif
return 0;
}
static int restore_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs,
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
}
-#endif
return 0;
}
@@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
-#ifdef CONFIG_64BIT
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
if (current->thread.vxrs)
uc_flags |= UC_VXRS;
}
-#endif
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index db8f1115..efd2c19 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
- if (!lc->extended_save_area_addr)
- goto out;
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
-#endif
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
{
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- struct _lowcore *lc = pcpu->lowcore;
-
- free_page((unsigned long) lc->extended_save_area_addr);
- lc->extended_save_area_addr = 0;
- }
-#else
vdso_free_per_cpu(pcpu->lowcore);
-#endif
if (pcpu == &pcpu_devices[0])
return;
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu)
pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
}
-#ifndef CONFIG_64BIT
-/*
- * this function sends a 'purge tlb' signal to another CPU.
- */
-static void smp_ptlb_callback(void *info)
-{
- __tlb_flush_local();
-}
-
-void smp_ptlb_all(void)
-{
- on_each_cpu(smp_ptlb_callback, NULL, 1);
-}
-EXPORT_SYMBOL(smp_ptlb_all);
-#endif /* ! CONFIG_64BIT */
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
@@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
- while (!cpu_online(cpu))
+ /* Wait until cpu puts itself in the online & active maps */
+ while (!cpu_online(cpu) || !cpu_active(cpu))
cpu_relax();
return 0;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 1c4c5ac..d3236c9 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
if (pfn <= LC_PAGES)
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S
index 6b09fdf..ca62946 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp.S
@@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
+#ifdef CONFIG_SMP
+ larl %r1,smp_cpu_mt_shift
+ icm %r1,15,0(%r1)
+ jz smt_done
+ llgfr %r1,%r1
+smt_loop:
+ sigp %r1,%r0,SIGP_SET_MULTI_THREADING
+ brc 8,smt_done /* accepted */
+ brc 2,smt_loop /* busy, try again */
+smt_done:
+#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 23eb222..f145490 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
return sys_ipc(call, first, second, third, ptr, third);
}
-#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
return ret;
}
-#endif /* CONFIG_64BIT */
-
-/*
- * Wrapper function for sys_fadvise64/fadvise64_64
- */
-#ifndef CONFIG_64BIT
-
-SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
- size_t, len, int, advice)
-{
- return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
- len, advice);
-}
-
-struct fadvise64_64_args {
- int fd;
- long long offset;
- long long len;
- int advice;
-};
-
-SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
-{
- struct fadvise64_64_args a;
-
- if ( copy_from_user(&a, args, sizeof(a)) )
- return -EFAULT;
- return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
-}
-
-/*
- * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
- * 64 bit argument "len" is split into the upper and lower 32 bits. The
- * system call wrapper in the user space loads the value to %r6/%r7.
- * The code in entry.S keeps the values in %r2 - %r6 where they are and
- * stores %r7 to 96(%r15). But the standard C linkage requires that
- * the whole 64 bit value for len is stored on the stack and doesn't
- * use %r6 at all. So s390_fallocate has to convert the arguments from
- * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
- * to
- * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
- */
-SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
- u32, len_high, u32, len_low)
-{
- return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
-}
-#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 939ec47..1acad02 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -1,365 +1,365 @@
/*
* definitions for sys_call_table, each line represents an
- * entry in the table in the form
- * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
+ * entry in the table in the form
+ * SYSCALL(64 bit syscall, 31 bit emulated syscall)
*
- * this file is meant to be included from entry.S and entry64.S
+ * this file is meant to be included from entry.S
*/
-#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
+#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
-NI_SYSCALL /* 0 */
-SYSCALL(sys_exit,sys_exit,compat_sys_exit)
-SYSCALL(sys_fork,sys_fork,sys_fork)
-SYSCALL(sys_read,sys_read,compat_sys_s390_read)
-SYSCALL(sys_write,sys_write,compat_sys_s390_write)
-SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */
-SYSCALL(sys_close,sys_close,compat_sys_close)
-SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,sys_creat,compat_sys_creat)
-SYSCALL(sys_link,sys_link,compat_sys_link)
-SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */
-SYSCALL(sys_execve,sys_execve,compat_sys_execve)
-SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir)
-SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */
-SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod)
-SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */
-SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
-NI_SYSCALL /* old break syscall holder */
-NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
-SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
-SYSCALL(sys_mount,sys_mount,compat_sys_mount)
-SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount)
-SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
-SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
-SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
-SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace)
-SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm)
-NI_SYSCALL /* old fstat syscall */
-SYSCALL(sys_pause,sys_pause,sys_pause)
-SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */
-NI_SYSCALL /* old stty syscall */
-NI_SYSCALL /* old gtty syscall */
-SYSCALL(sys_access,sys_access,compat_sys_access)
-SYSCALL(sys_nice,sys_nice,compat_sys_nice)
-NI_SYSCALL /* 35 old ftime syscall */
-SYSCALL(sys_sync,sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill,compat_sys_kill)
-SYSCALL(sys_rename,sys_rename,compat_sys_rename)
-SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir)
-SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */
-SYSCALL(sys_dup,sys_dup,compat_sys_dup)
-SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe)
-SYSCALL(sys_times,sys_times,compat_sys_times)
-NI_SYSCALL /* old prof syscall */
-SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */
-SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
-SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
-SYSCALL(sys_signal,sys_signal,compat_sys_signal)
-SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
-SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,sys_acct,compat_sys_acct)
-SYSCALL(sys_umount,sys_umount,compat_sys_umount)
-NI_SYSCALL /* old lock syscall */
-SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl)
-SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */
-NI_SYSCALL /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid)
-NI_SYSCALL /* old ulimit syscall */
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */
-SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot)
-SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat)
-SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2)
-SYSCALL(sys_getppid,sys_getppid,sys_getppid)
-SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
-SYSCALL(sys_setsid,sys_setsid,sys_setsid)
-SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
-NI_SYSCALL /* old sgetmask syscall*/
-NI_SYSCALL /* old ssetmask syscall*/
-SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
-SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend)
-SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending)
-SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname)
-SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */
-SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit)
-SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday)
-SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday)
-SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
-SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
-NI_SYSCALL /* old select syscall */
-SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink)
-NI_SYSCALL /* old lstat syscall */
-SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */
-SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib)
-SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon)
-SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot)
-SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
-SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
-SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap)
-SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
-SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod)
-SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority)
-SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority)
-NI_SYSCALL /* old profil syscall */
-SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs)
-SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */
-NI_SYSCALL /* ioperm for i386 */
-SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall)
-SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog)
-SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
-SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */
-SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat)
-SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat)
-SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat)
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
-SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
-NI_SYSCALL /* old "idle" system call */
-NI_SYSCALL /* vm86old for i386 */
-SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo)
-SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync)
-SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn)
-SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */
-SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname)
-SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname)
-NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex)
-SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */
-SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
-NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module)
-SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module)
-NI_SYSCALL /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl)
-SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid)
-SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir)
-SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush)
-SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */
-SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality)
-NI_SYSCALL /* for afs_syscall */
-SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
-SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */
-SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents)
-SYSCALL(sys_select,sys_select,compat_sys_select)
-SYSCALL(sys_flock,sys_flock,compat_sys_flock)
-SYSCALL(sys_msync,sys_msync,compat_sys_msync)
-SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */
-SYSCALL(sys_writev,sys_writev,compat_sys_writev)
-SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid)
-SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync)
-SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */
-SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock)
-SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall)
-SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam)
-SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
-SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler)
-SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
-SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep)
-SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap)
-SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
-SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
-NI_SYSCALL /* for vm86 */
-NI_SYSCALL /* old sys_query_module */
-SYSCALL(sys_poll,sys_poll,compat_sys_poll)
-NI_SYSCALL /* old nfsservctl */
-SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
-SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
-SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl)
-SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn)
-SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
-SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
-SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
-SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
-SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
-SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */
-SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64)
-SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
-SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd)
-SYSCALL(sys_capget,sys_capget,compat_sys_capget)
-SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */
-SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
-SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
-NI_SYSCALL /* streams1 */
-NI_SYSCALL /* streams2 */
-SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
-SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit)
-SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2)
-SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64)
-SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64)
-SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
-SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64)
-SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64)
-SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown)
-SYSCALL(sys_getuid,sys_getuid,sys_getuid)
-SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
-SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
-SYSCALL(sys_getegid,sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid)
-SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid)
-SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */
-SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups)
-SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown)
-SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid)
-SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid)
-SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */
-SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid)
-SYSCALL(sys_chown,sys_chown,compat_sys_chown)
-SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid)
-SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid)
-SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid)
-SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root)
-SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore)
-SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise)
-SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */
-SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64)
-SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead)
-SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr)
-SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
-SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr)
-SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr)
-SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr)
-SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr)
-SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */
-SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr)
-SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr)
-SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr)
-SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr)
-SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
-SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill)
-SYSCALL(sys_futex,sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity)
-SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill)
-NI_SYSCALL /* reserved for TUX */
-SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup)
-SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy)
-SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */
-SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit)
-SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel)
-SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group)
-SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create)
-SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
-SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait)
-SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address)
-SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64)
-SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create)
-SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */
-SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun)
-SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete)
-SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime)
-SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
-SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres)
-SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep)
-NI_SYSCALL /* reserved for vserver */
-SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64)
-SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64)
-SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64)
-SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages)
-NI_SYSCALL /* 268 sys_mbind */
-NI_SYSCALL /* 269 sys_get_mempolicy */
-NI_SYSCALL /* 270 sys_set_mempolicy */
-SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open)
-SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink)
-SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend)
-SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive)
-SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */
-SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr)
-SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load)
-SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key)
-SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
-SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set)
-SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get)
-SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
-NI_SYSCALL /* 287 sys_migrate_pages */
-SYSCALL(sys_openat,sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat)
-SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */
-SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat)
-SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat)
-SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64)
-SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat)
-SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */
-SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat)
-SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat)
-SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat)
-SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat)
-SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */
-SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6)
-SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll)
-SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare)
-SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
-SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,sys_splice,compat_sys_splice)
-SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range)
-SYSCALL(sys_tee,sys_tee,compat_sys_tee)
-SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
-NI_SYSCALL /* 310 sys_move_pages */
-SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu)
-SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes)
-SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate)
-SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */
-SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
+NI_SYSCALL /* 0 */
+SYSCALL(sys_exit,compat_sys_exit)
+SYSCALL(sys_fork,sys_fork)
+SYSCALL(sys_read,compat_sys_s390_read)
+SYSCALL(sys_write,compat_sys_s390_write)
+SYSCALL(sys_open,compat_sys_open) /* 5 */
+SYSCALL(sys_close,compat_sys_close)
+SYSCALL(sys_restart_syscall,sys_restart_syscall)
+SYSCALL(sys_creat,compat_sys_creat)
+SYSCALL(sys_link,compat_sys_link)
+SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */
+SYSCALL(sys_execve,compat_sys_execve)
+SYSCALL(sys_chdir,compat_sys_chdir)
+SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */
+SYSCALL(sys_mknod,compat_sys_mknod)
+SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
+NI_SYSCALL /* old break syscall holder */
+NI_SYSCALL /* old stat syscall holder */
+SYSCALL(sys_lseek,compat_sys_lseek)
+SYSCALL(sys_getpid,sys_getpid) /* 20 */
+SYSCALL(sys_mount,compat_sys_mount)
+SYSCALL(sys_oldumount,compat_sys_oldumount)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
+SYSCALL(sys_ptrace,compat_sys_ptrace)
+SYSCALL(sys_alarm,compat_sys_alarm)
+NI_SYSCALL /* old fstat syscall */
+SYSCALL(sys_pause,sys_pause)
+SYSCALL(sys_utime,compat_sys_utime) /* 30 */
+NI_SYSCALL /* old stty syscall */
+NI_SYSCALL /* old gtty syscall */
+SYSCALL(sys_access,compat_sys_access)
+SYSCALL(sys_nice,compat_sys_nice)
+NI_SYSCALL /* 35 old ftime syscall */
+SYSCALL(sys_sync,sys_sync)
+SYSCALL(sys_kill,compat_sys_kill)
+SYSCALL(sys_rename,compat_sys_rename)
+SYSCALL(sys_mkdir,compat_sys_mkdir)
+SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
+SYSCALL(sys_dup,compat_sys_dup)
+SYSCALL(sys_pipe,compat_sys_pipe)
+SYSCALL(sys_times,compat_sys_times)
+NI_SYSCALL /* old prof syscall */
+SYSCALL(sys_brk,compat_sys_brk) /* 45 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
+SYSCALL(sys_signal,compat_sys_signal)
+SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
+SYSCALL(sys_acct,compat_sys_acct)
+SYSCALL(sys_umount,compat_sys_umount)
+NI_SYSCALL /* old lock syscall */
+SYSCALL(sys_ioctl,compat_sys_ioctl)
+SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
+NI_SYSCALL /* intel mpx syscall */
+SYSCALL(sys_setpgid,compat_sys_setpgid)
+NI_SYSCALL /* old ulimit syscall */
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_umask,compat_sys_umask) /* 60 */
+SYSCALL(sys_chroot,compat_sys_chroot)
+SYSCALL(sys_ustat,compat_sys_ustat)
+SYSCALL(sys_dup2,compat_sys_dup2)
+SYSCALL(sys_getppid,sys_getppid)
+SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
+SYSCALL(sys_setsid,sys_setsid)
+SYSCALL(sys_sigaction,compat_sys_sigaction)
+NI_SYSCALL /* old sgetmask syscall*/
+NI_SYSCALL /* old ssetmask syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
+SYSCALL(sys_sigsuspend,compat_sys_sigsuspend)
+SYSCALL(sys_sigpending,compat_sys_sigpending)
+SYSCALL(sys_sethostname,compat_sys_sethostname)
+SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */
+SYSCALL(sys_getrlimit,compat_sys_old_getrlimit)
+SYSCALL(sys_getrusage,compat_sys_getrusage)
+SYSCALL(sys_gettimeofday,compat_sys_gettimeofday)
+SYSCALL(sys_settimeofday,compat_sys_settimeofday)
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
+NI_SYSCALL /* old select syscall */
+SYSCALL(sys_symlink,compat_sys_symlink)
+NI_SYSCALL /* old lstat syscall */
+SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */
+SYSCALL(sys_uselib,compat_sys_uselib)
+SYSCALL(sys_swapon,compat_sys_swapon)
+SYSCALL(sys_reboot,compat_sys_reboot)
+SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
+SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
+SYSCALL(sys_munmap,compat_sys_munmap)
+SYSCALL(sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,compat_sys_ftruncate)
+SYSCALL(sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,compat_sys_getpriority)
+SYSCALL(sys_setpriority,compat_sys_setpriority)
+NI_SYSCALL /* old profil syscall */
+SYSCALL(sys_statfs,compat_sys_statfs)
+SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
+NI_SYSCALL /* ioperm for i386 */
+SYSCALL(sys_socketcall,compat_sys_socketcall)
+SYSCALL(sys_syslog,compat_sys_syslog)
+SYSCALL(sys_setitimer,compat_sys_setitimer)
+SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */
+SYSCALL(sys_newstat,compat_sys_newstat)
+SYSCALL(sys_newlstat,compat_sys_newlstat)
+SYSCALL(sys_newfstat,compat_sys_newfstat)
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
+SYSCALL(sys_vhangup,sys_vhangup)
+NI_SYSCALL /* old "idle" system call */
+NI_SYSCALL /* vm86old for i386 */
+SYSCALL(sys_wait4,compat_sys_wait4)
+SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
+SYSCALL(sys_sysinfo,compat_sys_sysinfo)
+SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
+SYSCALL(sys_fsync,compat_sys_fsync)
+SYSCALL(sys_sigreturn,compat_sys_sigreturn)
+SYSCALL(sys_clone,compat_sys_clone) /* 120 */
+SYSCALL(sys_setdomainname,compat_sys_setdomainname)
+SYSCALL(sys_newuname,compat_sys_newuname)
+NI_SYSCALL /* modify_ldt for i386 */
+SYSCALL(sys_adjtimex,compat_sys_adjtimex)
+SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */
+SYSCALL(sys_sigprocmask,compat_sys_sigprocmask)
+NI_SYSCALL /* old "create module" */
+SYSCALL(sys_init_module,compat_sys_init_module)
+SYSCALL(sys_delete_module,compat_sys_delete_module)
+NI_SYSCALL /* 130: old get_kernel_syms */
+SYSCALL(sys_quotactl,compat_sys_quotactl)
+SYSCALL(sys_getpgid,compat_sys_getpgid)
+SYSCALL(sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_bdflush,compat_sys_bdflush)
+SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
+SYSCALL(sys_s390_personality,compat_sys_s390_personality)
+NI_SYSCALL /* for afs_syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
+SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
+SYSCALL(sys_getdents,compat_sys_getdents)
+SYSCALL(sys_select,compat_sys_select)
+SYSCALL(sys_flock,compat_sys_flock)
+SYSCALL(sys_msync,compat_sys_msync)
+SYSCALL(sys_readv,compat_sys_readv) /* 145 */
+SYSCALL(sys_writev,compat_sys_writev)
+SYSCALL(sys_getsid,compat_sys_getsid)
+SYSCALL(sys_fdatasync,compat_sys_fdatasync)
+SYSCALL(sys_sysctl,compat_sys_sysctl)
+SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
+SYSCALL(sys_munlock,compat_sys_munlock)
+SYSCALL(sys_mlockall,compat_sys_mlockall)
+SYSCALL(sys_munlockall,sys_munlockall)
+SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
+SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
+SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
+SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler)
+SYSCALL(sys_sched_yield,sys_sched_yield)
+SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
+SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
+SYSCALL(sys_nanosleep,compat_sys_nanosleep)
+SYSCALL(sys_mremap,compat_sys_mremap)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
+NI_SYSCALL /* for vm86 */
+NI_SYSCALL /* old sys_query_module */
+SYSCALL(sys_poll,compat_sys_poll)
+NI_SYSCALL /* old nfsservctl */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
+SYSCALL(sys_prctl,compat_sys_prctl)
+SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn)
+SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction)
+SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
+SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending)
+SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
+SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
+SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
+SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */
+SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
+SYSCALL(sys_getcwd,compat_sys_getcwd)
+SYSCALL(sys_capget,compat_sys_capget)
+SYSCALL(sys_capset,compat_sys_capset) /* 185 */
+SYSCALL(sys_sigaltstack,compat_sys_sigaltstack)
+SYSCALL(sys_sendfile64,compat_sys_sendfile)
+NI_SYSCALL /* streams1 */
+NI_SYSCALL /* streams2 */
+SYSCALL(sys_vfork,sys_vfork) /* 190 */
+SYSCALL(sys_getrlimit,compat_sys_getrlimit)
+SYSCALL(sys_mmap2,compat_sys_s390_mmap2)
+SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64)
+SYSCALL(sys_lchown,compat_sys_lchown)
+SYSCALL(sys_getuid,sys_getuid)
+SYSCALL(sys_getgid,sys_getgid) /* 200 */
+SYSCALL(sys_geteuid,sys_geteuid)
+SYSCALL(sys_getegid,sys_getegid)
+SYSCALL(sys_setreuid,compat_sys_setreuid)
+SYSCALL(sys_setregid,compat_sys_setregid)
+SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
+SYSCALL(sys_setgroups,compat_sys_setgroups)
+SYSCALL(sys_fchown,compat_sys_fchown)
+SYSCALL(sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_getresuid,compat_sys_getresuid)
+SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */
+SYSCALL(sys_getresgid,compat_sys_getresgid)
+SYSCALL(sys_chown,compat_sys_chown)
+SYSCALL(sys_setuid,compat_sys_setuid)
+SYSCALL(sys_setgid,compat_sys_setgid)
+SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */
+SYSCALL(sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_pivot_root,compat_sys_pivot_root)
+SYSCALL(sys_mincore,compat_sys_mincore)
+SYSCALL(sys_madvise,compat_sys_madvise)
+SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */
+SYSCALL(sys_ni_syscall,compat_sys_fcntl64)
+SYSCALL(sys_readahead,compat_sys_s390_readahead)
+SYSCALL(sys_ni_syscall,compat_sys_sendfile64)
+SYSCALL(sys_setxattr,compat_sys_setxattr)
+SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
+SYSCALL(sys_fsetxattr,compat_sys_fsetxattr)
+SYSCALL(sys_getxattr,compat_sys_getxattr)
+SYSCALL(sys_lgetxattr,compat_sys_lgetxattr)
+SYSCALL(sys_fgetxattr,compat_sys_fgetxattr)
+SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */
+SYSCALL(sys_llistxattr,compat_sys_llistxattr)
+SYSCALL(sys_flistxattr,compat_sys_flistxattr)
+SYSCALL(sys_removexattr,compat_sys_removexattr)
+SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
+SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
+SYSCALL(sys_gettid,sys_gettid)
+SYSCALL(sys_tkill,compat_sys_tkill)
+SYSCALL(sys_futex,compat_sys_futex)
+SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
+SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
+SYSCALL(sys_tgkill,compat_sys_tgkill)
+NI_SYSCALL /* reserved for TUX */
+SYSCALL(sys_io_setup,compat_sys_io_setup)
+SYSCALL(sys_io_destroy,compat_sys_io_destroy)
+SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
+SYSCALL(sys_io_submit,compat_sys_io_submit)
+SYSCALL(sys_io_cancel,compat_sys_io_cancel)
+SYSCALL(sys_exit_group,compat_sys_exit_group)
+SYSCALL(sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
+SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
+SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
+SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
+SYSCALL(sys_timer_create,compat_sys_timer_create)
+SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
+SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
+SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_clock_settime,compat_sys_clock_settime)
+SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
+SYSCALL(sys_clock_getres,compat_sys_clock_getres)
+SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep)
+NI_SYSCALL /* reserved for vserver */
+SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64)
+SYSCALL(sys_statfs64,compat_sys_statfs64)
+SYSCALL(sys_fstatfs64,compat_sys_fstatfs64)
+SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages)
+NI_SYSCALL /* 268 sys_mbind */
+NI_SYSCALL /* 269 sys_get_mempolicy */
+NI_SYSCALL /* 270 sys_set_mempolicy */
+SYSCALL(sys_mq_open,compat_sys_mq_open)
+SYSCALL(sys_mq_unlink,compat_sys_mq_unlink)
+SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend)
+SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive)
+SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */
+SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr)
+SYSCALL(sys_kexec_load,compat_sys_kexec_load)
+SYSCALL(sys_add_key,compat_sys_add_key)
+SYSCALL(sys_request_key,compat_sys_request_key)
+SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
+SYSCALL(sys_waitid,compat_sys_waitid)
+SYSCALL(sys_ioprio_set,compat_sys_ioprio_set)
+SYSCALL(sys_ioprio_get,compat_sys_ioprio_get)
+SYSCALL(sys_inotify_init,sys_inotify_init)
+SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
+SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
+NI_SYSCALL /* 287 sys_migrate_pages */
+SYSCALL(sys_openat,compat_sys_openat)
+SYSCALL(sys_mkdirat,compat_sys_mkdirat)
+SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */
+SYSCALL(sys_fchownat,compat_sys_fchownat)
+SYSCALL(sys_futimesat,compat_sys_futimesat)
+SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64)
+SYSCALL(sys_unlinkat,compat_sys_unlinkat)
+SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */
+SYSCALL(sys_linkat,compat_sys_linkat)
+SYSCALL(sys_symlinkat,compat_sys_symlinkat)
+SYSCALL(sys_readlinkat,compat_sys_readlinkat)
+SYSCALL(sys_fchmodat,compat_sys_fchmodat)
+SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */
+SYSCALL(sys_pselect6,compat_sys_pselect6)
+SYSCALL(sys_ppoll,compat_sys_ppoll)
+SYSCALL(sys_unshare,compat_sys_unshare)
+SYSCALL(sys_set_robust_list,compat_sys_set_robust_list)
+SYSCALL(sys_get_robust_list,compat_sys_get_robust_list)
+SYSCALL(sys_splice,compat_sys_splice)
+SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range)
+SYSCALL(sys_tee,compat_sys_tee)
+SYSCALL(sys_vmsplice,compat_sys_vmsplice)
+NI_SYSCALL /* 310 sys_move_pages */
+SYSCALL(sys_getcpu,compat_sys_getcpu)
+SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait)
+SYSCALL(sys_utimes,compat_sys_utimes)
+SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
+SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
+SYSCALL(sys_signalfd,compat_sys_signalfd)
NI_SYSCALL /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd)
-SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create)
-SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
-SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
-SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2)
-SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1)
-SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */
-SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3)
-SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1)
-SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv)
-SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
-SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open)
-SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64)
-SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
-SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime)
-SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs)
-SYSCALL(sys_setns,sys_setns,compat_sys_setns)
-SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
-SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev)
-SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
-SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
-SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
-SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
-SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
-SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
-SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
-SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
-SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
-SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
-SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
+SYSCALL(sys_eventfd,compat_sys_eventfd)
+SYSCALL(sys_timerfd_create,compat_sys_timerfd_create)
+SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
+SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
+SYSCALL(sys_signalfd4,compat_sys_signalfd4)
+SYSCALL(sys_eventfd2,compat_sys_eventfd2)
+SYSCALL(sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
+SYSCALL(sys_dup3,compat_sys_dup3)
+SYSCALL(sys_epoll_create1,compat_sys_epoll_create1)
+SYSCALL(sys_preadv,compat_sys_preadv)
+SYSCALL(sys_pwritev,compat_sys_pwritev)
+SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
+SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
+SYSCALL(sys_fanotify_init,compat_sys_fanotify_init)
+SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
+SYSCALL(sys_prlimit64,compat_sys_prlimit64)
+SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
+SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
+SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
+SYSCALL(sys_syncfs,compat_sys_syncfs)
+SYSCALL(sys_setns,compat_sys_setns)
+SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
+SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
+SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_kcmp,compat_sys_kcmp)
+SYSCALL(sys_finit_module,compat_sys_finit_module)
+SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
+SYSCALL(sys_sched_getattr,compat_sys_sched_getattr)
+SYSCALL(sys_renameat2,compat_sys_renameat2)
+SYSCALL(sys_seccomp,compat_sys_seccomp)
+SYSCALL(sys_getrandom,compat_sys_getrandom)
+SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */
+SYSCALL(sys_bpf,compat_sys_bpf)
+SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
+SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 20660dd..170ddd2 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk)
{
u64 nsecps;
- if (tk->tkr.clock != &clocksource_tod)
+ if (tk->tkr_mono.clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
+ vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
+ vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
- + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
- nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
+ vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
+ + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
+ nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
- (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
+ (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_coarse_sec++;
}
- vdso_data->tk_mult = tk->tkr.mult;
- vdso_data->tk_shift = tk->tkr.shift;
+ vdso_data->tk_mult = tk->tkr_mono.mult;
+ vdso_data->tk_shift = tk->tkr_mono.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
@@ -283,7 +283,7 @@ void __init time_init(void)
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
- if (clocksource_register(&clocksource_tod) != 0)
+ if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14da43b..5728c5b 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
-const struct cpumask *cpu_thread_mask(int cpu)
+static const struct cpumask *cpu_thread_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).thread_mask;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f081cf1..4d96c9f 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1;
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
unsigned long address;
if (regs->int_code & 0x200)
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
address = regs->psw.addr;
return (void __user *)
((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#else
- return (void __user *)
- ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#endif
}
static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
-
-#ifdef CONFIG_64BIT
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
-#endif
static inline void do_fp_trap(struct pt_regs *regs, int fpc)
{
@@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
void translation_exception(struct pt_regs *regs)
{
/* May never happen. */
- die(regs, "Translation exception");
+ panic("Translation exception");
}
void illegal_op(struct pt_regs *regs)
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs)
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
-#ifdef CONFIG_MATHEMU
- } else if (opcode[0] == 0xb3) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_b3(opcode, regs);
- } else if (opcode[0] == 0xed) {
- if (get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1)))
- return;
- signal = math_emu_ed(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb299) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_srnm(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29c) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_stfpc(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29d) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_lfpc(opcode, regs);
-#endif
} else
signal = SIGILL;
}
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs)
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
-
-#ifdef CONFIG_MATHEMU
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal == SIGSEGV)
- do_trap(regs, signal, SEGV_MAPERR, "user address fault");
- else
-#endif
if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
NOKPROBE_SYMBOL(illegal_op);
-#ifdef CONFIG_MATHEMU
-void specification_exception(struct pt_regs *regs)
-{
- __u8 opcode[6];
- __u16 __user *location = NULL;
- int signal = 0;
-
- location = (__u16 __user *) get_trap_ip(regs);
-
- if (user_mode(regs)) {
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- default:
- signal = SIGILL;
- break;
- }
- } else
- signal = SIGILL;
-
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal)
- do_trap(regs, signal, ILL_ILLOPN, "specification exception");
-}
-#else
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
-#endif
-#ifdef CONFIG_64BIT
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str)
return 1;
}
__setup("novx", disable_vector_extension);
-#endif
void data_exception(struct pt_regs *regs)
{
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
- if (MACHINE_HAS_IEEE)
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
-
-#ifdef CONFIG_MATHEMU
- else if (user_mode(regs)) {
- __u8 opcode[6];
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- case 0xb3:
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_b3(opcode, regs);
- break;
- case 0xed:
- get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1));
- signal = math_emu_ed(opcode, regs);
- break;
- case 0xb2:
- if (opcode[1] == 0x99) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_srnm(opcode, regs);
- } else if (opcode[1] == 0x9c) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_stfpc(opcode, regs);
- } else if (opcode[1] == 0x9d) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_lfpc(opcode, regs);
- } else
- signal = SIGILL;
- break;
- default:
- signal = SIGILL;
- break;
- }
- }
-#endif
-#ifdef CONFIG_64BIT
+ asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs)
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
-#endif
-
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
signal = SIGILL;
- if (signal == SIGFPE)
+ if (signal == SIGFPE)
do_fp_trap(regs, current->thread.fp_regs.fpc);
else if (signal)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index cc73280..66956c0 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
if (__rc == 0) \
- sim_stor_event(regs, __ptr, mask + 1); \
+ sim_stor_event(regs, \
+ (void __force *)__ptr, \
+ mask + 1); \
__rc; \
})
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0bbb7e0..0d58269 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -32,19 +32,17 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
-#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
-#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd)
vd->ectg_available = test_facility(31);
}
-#ifdef CONFIG_64BIT
/*
* Allocate/free per cpu vdso data.
*/
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void)
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
-#endif /* CONFIG_64BIT */
/*
* This is called from binfmt_elf, we create the special vma for the
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!uses_interp)
return 0;
-#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso32_pages;
}
#endif
-#else
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
-#endif
-
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
@@ -268,7 +258,7 @@ static int __init vdso_init(void)
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -287,7 +277,6 @@ static int __init vdso_init(void)
vdso32_pagelist[vdso32_pages] = NULL;
#endif
-#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -307,7 +296,6 @@ static int __init vdso_init(void)
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
vdso_init_cr5();
-#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed..445657f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -6,17 +6,10 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
-#ifndef CONFIG_64BIT
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-ENTRY(startup)
-jiffies = jiffies_64 + 4;
-#else
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
ENTRY(startup)
jiffies = jiffies_64;
-#endif
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254aff..fc7ec95 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
+ rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally)
*/
- ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
+ int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 267523c..a7559f7 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include "kvm-s390.h"
#include "gaccess.h"
+#include <asm/switch_to.h>
union asce {
unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
+union alet {
+ u32 val;
+ struct {
+ u32 reserved : 7;
+ u32 p : 1;
+ u32 alesn : 8;
+ u32 alen : 16;
+ };
+};
+
+union ald {
+ u32 val;
+ struct {
+ u32 : 1;
+ u32 alo : 24;
+ u32 all : 7;
+ };
+};
+
+struct ale {
+ unsigned long i : 1; /* ALEN-Invalid Bit */
+ unsigned long : 5;
+ unsigned long fo : 1; /* Fetch-Only Bit */
+ unsigned long p : 1; /* Private Bit */
+ unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
+ unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
+ unsigned long : 32;
+ unsigned long : 1;
+ unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
+ unsigned long : 6;
+ unsigned long astesn : 32; /* ASTE Sequence Number */
+} __packed;
+
+struct aste {
+ unsigned long i : 1; /* ASX-Invalid Bit */
+ unsigned long ato : 29; /* Authority-Table Origin */
+ unsigned long : 1;
+ unsigned long b : 1; /* Base-Space Bit */
+ unsigned long ax : 16; /* Authorization Index */
+ unsigned long atl : 12; /* Authority-Table Length */
+ unsigned long : 2;
+ unsigned long ca : 1; /* Controlled-ASN Bit */
+ unsigned long ra : 1; /* Reusable-ASN Bit */
+ unsigned long asce : 64; /* Address-Space-Control Element */
+ unsigned long ald : 32;
+ unsigned long astesn : 32;
+ /* .. more fields there */
+} __packed;
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu);
}
-static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
+static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
+ int write)
+{
+ union alet alet;
+ struct ale ale;
+ struct aste aste;
+ unsigned long ald_addr, authority_table_addr;
+ union ald ald;
+ int eax, rc;
+ u8 authority_table;
+
+ if (ar >= NUM_ACRS)
+ return -EINVAL;
+
+ save_access_regs(vcpu->run->s.regs.acrs);
+ alet.val = vcpu->run->s.regs.acrs[ar];
+
+ if (ar == 0 || alet.val == 0) {
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
+ } else if (alet.val == 1) {
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
+ }
+
+ if (alet.reserved)
+ return PGM_ALET_SPECIFICATION;
+
+ if (alet.p)
+ ald_addr = vcpu->arch.sie_block->gcr[5];
+ else
+ ald_addr = vcpu->arch.sie_block->gcr[2];
+ ald_addr &= 0x7fffffc0;
+
+ rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
+ if (rc)
+ return rc;
+
+ if (alet.alen / 8 > ald.all)
+ return PGM_ALEN_TRANSLATION;
+
+ if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
+ return PGM_ADDRESSING;
+
+ rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
+ sizeof(struct ale));
+ if (rc)
+ return rc;
+
+ if (ale.i == 1)
+ return PGM_ALEN_TRANSLATION;
+ if (ale.alesn != alet.alesn)
+ return PGM_ALE_SEQUENCE;
+
+ rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
+ if (rc)
+ return rc;
+
+ if (aste.i)
+ return PGM_ASTE_VALIDITY;
+ if (aste.astesn != ale.astesn)
+ return PGM_ASTE_SEQUENCE;
+
+ if (ale.p == 1) {
+ eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
+ if (ale.aleax != eax) {
+ if (eax / 16 > aste.atl)
+ return PGM_EXTENDED_AUTHORITY;
+
+ authority_table_addr = aste.ato * 4 + eax / 4;
+
+ rc = read_guest_real(vcpu, authority_table_addr,
+ &authority_table,
+ sizeof(u8));
+ if (rc)
+ return rc;
+
+ if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
+ return PGM_EXTENDED_AUTHORITY;
+ }
+ }
+
+ if (ale.fo == 1 && write)
+ return PGM_PROTECTION;
+
+ asce->val = aste.asce;
+ return 0;
+}
+
+struct trans_exc_code_bits {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 6;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+};
+
+enum {
+ FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
+ FSI_STORE = 1, /* Exception was due to store operation */
+ FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
+ ar_t ar, int write)
{
+ int rc;
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
+ struct trans_exc_code_bits *tec_bits;
+
+ memset(pgm, 0, sizeof(*pgm));
+ tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+ tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
+ tec_bits->as = psw_bits(*psw).as;
+
+ if (!psw_bits(*psw).t) {
+ asce->val = 0;
+ asce->r = 1;
+ return 0;
+ }
+
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
case PSW_AS_PRIMARY:
- return vcpu->arch.sie_block->gcr[1];
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
case PSW_AS_SECONDARY:
- return vcpu->arch.sie_block->gcr[7];
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
case PSW_AS_HOME:
- return vcpu->arch.sie_block->gcr[13];
+ asce->val = vcpu->arch.sie_block->gcr[13];
+ return 0;
+ case PSW_AS_ACCREG:
+ rc = ar_translation(vcpu, asce, ar, write);
+ switch (rc) {
+ case PGM_ALEN_TRANSLATION:
+ case PGM_ALE_SEQUENCE:
+ case PGM_ASTE_VALIDITY:
+ case PGM_ASTE_SEQUENCE:
+ case PGM_EXTENDED_AUTHORITY:
+ vcpu->arch.pgm.exc_access_id = ar;
+ break;
+ case PGM_PROTECTION:
+ tec_bits->b60 = 1;
+ tec_bits->b61 = 1;
+ break;
+ }
+ if (rc > 0)
+ pgm->code = rc;
+ return rc;
}
return 0;
}
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @vcpu: virtual cpu
* @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored
+ * @asce: effective asce
* @write: indicates if access is a write access
*
* Translate a guest virtual address into a guest absolute address by means
- * of dynamic address translation as specified by the architecuture.
+ * of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed.
*
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* by the architecture
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write)
+ unsigned long *gpa, const union asce asce,
+ int write)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
union ctlreg0 ctlreg0;
unsigned long ptr;
int edat1, edat2;
- union asce asce;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
- asce.val = get_vcpu_asce(vcpu);
if (asce.r)
goto real_address;
ptr = asce.origin * 4096;
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
return (ga & ~0x11fful) == 0;
}
-static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
+static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
+ const union asce asce)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- union asce asce;
if (!ctlreg0.lap)
return 0;
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && asce.p)
return 0;
return 1;
}
-struct trans_exc_code_bits {
- unsigned long addr : 52; /* Translation-exception Address */
- unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 7;
- unsigned long b61 : 1;
- unsigned long as : 2; /* ASCE Identifier */
-};
-
-enum {
- FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
- FSI_STORE = 1, /* Exception was due to store operation */
- FSI_FETCH = 2 /* Exception was due to fetch operation */
-};
-
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
unsigned long *pages, unsigned long nr_pages,
- int write)
+ const union asce asce, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
int lap_enabled, rc;
- memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
- tec_bits->as = psw_bits(*psw).as;
- lap_enabled = low_address_protection_enabled(vcpu);
+ lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga);
tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
}
ga &= PAGE_MASK;
if (psw_bits(*psw).t) {
- rc = guest_translate(vcpu, ga, pages, write);
+ rc = guest_translate(vcpu, ga, pages, asce, write);
if (rc < 0)
return rc;
if (rc == PGM_PROTECTION)
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
return 0;
}
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
if (!len)
return 0;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
+ if (rc)
+ return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(nr_pages * sizeof(unsigned long));
if (!pages)
return -ENOMEM;
- asce.val = get_vcpu_asce(vcpu);
need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
+ rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
-int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
union asce asce;
int rc;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
-
gva = kvm_s390_logical_to_effective(vcpu, gva);
- memset(pgm, 0, sizeof(*pgm));
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec->as = psw_bits(*psw).as;
- tec->fsi = write ? FSI_STORE : FSI_FETCH;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
tec->addr = gva >> PAGE_SHIFT;
- if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+ if (rc)
+ return rc;
+ if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (write) {
rc = pgm->code = PGM_PROTECTION;
return rc;
}
}
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, write);
+ rc = guest_translate(vcpu, gva, gpa, asce, write);
if (rc > 0) {
if (rc == PGM_PROTECTION)
tec->b61 = 1;
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
}
/**
- * kvm_s390_check_low_addr_protection - check for low-address protection
- * @ga: Guest address
+ * check_gva_range - test a range of guest virtual addresses for accessibility
+ */
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write)
+{
+ unsigned long gpa;
+ unsigned long currlen;
+ int rc = 0;
+
+ ipte_lock(vcpu);
+ while (length > 0 && !rc) {
+ currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
+ rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+ gva += currlen;
+ length -= currlen;
+ }
+ ipte_unlock(vcpu);
+
+ return rc;
+}
+
+/**
+ * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
+ union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
- if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
+ if (!ctlreg0.lap || !is_low_address(gra))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = ga >> PAGE_SHIFT;
+ tec_bits->addr = gra >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf1..ef03726 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
}
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write);
+ ar_t ar, unsigned long *gpa, int write);
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write);
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* write_guest - copy data from kernel space to guest space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: source address in kernel space
* @len: number of bytes to copy
*
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* If DAT is off data will be copied to guest real or absolute memory.
* If DAT is on data will be copied to the address space as specified by
* the address space bits of the PSW:
- * Primary, secondory or home space (access register mode is currently not
- * implemented).
+ * Primary, secondary, home space or access register mode.
* The addressing mode of the PSW is also inspected, so that address wrap
* around is taken into account for 24-, 31- and 64-bit addressing mode,
* if the to be copied data crosses page boundaries in guest address space.
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception.
*/
static inline __must_check
-int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 1);
+ return access_guest(vcpu, ga, ar, data, len, 1);
}
/**
* read_guest - copy data from guest space to kernel space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: destination address in kernel space
* @len: number of bytes to copy
*
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
* data will be copied from guest space to kernel space.
*/
static inline __must_check
-int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 0);
+ return access_guest(vcpu, ga, ar, data, len, 0);
}
/**
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 3e8d409..e97b345 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data)
return -ENOMEM;
/* try to backup the original value */
- ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
- wp_info->len);
+ ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
+ wp_info->len);
if (ret) {
kfree(wp_info->old_data);
wp_info->old_data = NULL;
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue;
/* refetch the wp data and compare it to the old value */
- if (!read_guest(vcpu, wp_info->phys_addr, temp,
- wp_info->len)) {
+ if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
+ wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp);
return wp_info;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index bebd215..9e3779e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break;
@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the source is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
- &srcaddr, 0);
+ reg2, &srcaddr, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the destination is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
- &dstaddr, 1);
+ reg1, &dstaddr, 1);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 073b5f3..9de4726 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,7 +1,7 @@
/*
* handling kvm guest interrupts
*
- * Copyright IBM Corp. 2008,2014
+ * Copyright IBM Corp. 2008, 2015
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -17,9 +17,12 @@
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
+#include <asm/dis.h>
#include <asm/uaccess.h>
#include <asm/sclp.h>
+#include <asm/isc.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -32,11 +35,6 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
-static int is_ioint(u64 type)
-{
- return ((type & 0xfffe0000u) != 0xfffe0000u);
-}
-
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@@ -72,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
return 1;
}
-static u64 int_word_to_isc_bits(u32 int_word)
+static int ckc_irq_pending(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.sie_block->ckc <
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+ return 0;
+ return ckc_interrupts_enabled(vcpu);
+}
+
+static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
+{
+ return !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x400ul);
+}
+
+static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.sie_block->cputm >> 63) &&
+ cpu_timer_interrupts_enabled(vcpu);
+}
+
+static inline int is_ioirq(unsigned long irq_type)
{
- u8 isc = (int_word & 0x38000000) >> 27;
+ return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
+ (irq_type <= IRQ_PEND_IO_ISC_7));
+}
+static uint64_t isc_to_isc_bits(int isc)
+{
return (0x80 >> isc) << 24;
}
-static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static inline u8 int_word_to_isc(u32 int_word)
{
- switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
- return 1;
- return 0;
- case KVM_S390_INT_EMERGENCY:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
- return 1;
- return 0;
- case KVM_S390_INT_CLOCK_COMP:
- return ckc_interrupts_enabled(vcpu);
- case KVM_S390_INT_CPU_TIMER:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
- return 1;
- return 0;
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
- return 1;
- return 0;
- case KVM_S390_PROGRAM_INT:
- case KVM_S390_SIGP_STOP:
- case KVM_S390_SIGP_SET_PREFIX:
- case KVM_S390_RESTART:
- return 1;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
- return 1;
- return 0;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[6] &
- int_word_to_isc_bits(inti->io.io_int_word))
- return 1;
- return 0;
- default:
- printk(KERN_WARNING "illegal interrupt type %llx\n",
- inti->type);
- BUG();
- }
- return 0;
+ return (int_word & 0x38000000) >> 27;
+}
+
+static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.float_int.pending_irqs;
}
static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
@@ -143,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
return vcpu->arch.local_int.pending_irqs;
}
-static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
+ unsigned long active_mask)
+{
+ int i;
+
+ for (i = 0; i <= MAX_ISC; i++)
+ if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
+ active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
+
+ return active_mask;
+}
+
+static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
{
- unsigned long active_mask = pending_local_irqs(vcpu);
+ unsigned long active_mask;
+
+ active_mask = pending_local_irqs(vcpu);
+ active_mask |= pending_floating_irqs(vcpu);
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (psw_ioint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_IO_MASK;
+ else
+ active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
@@ -157,8 +149,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
+ __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ if (!(vcpu->arch.sie_block->gcr[14] &
+ vcpu->kvm->arch.float_int.mchk.cr14))
+ __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
/*
* STOP irqs will never be actively delivered. They are triggered via
@@ -200,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ return;
+ else if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+}
+
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
@@ -226,47 +233,17 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
}
-/* Set interception request for non-deliverable local interrupts */
-static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+/* Set interception request for non-deliverable interrupts */
+static void set_intercept_indicators(struct kvm_vcpu *vcpu)
{
+ set_intercept_indicators_io(vcpu);
set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu);
set_intercept_indicators_stop(vcpu);
}
-static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR0;
- break;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- vcpu->arch.sie_block->ictl |= ICTL_LPSW;
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR14;
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_IO_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR6;
- break;
- default:
- BUG();
- }
-}
-
static u16 get_ilc(struct kvm_vcpu *vcpu)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
-
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
@@ -274,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* last instruction only stored for these icptcodes */
- return table[vcpu->arch.sie_block->ipa >> 14];
+ return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc;
default:
@@ -350,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_mchk_info mchk;
- int rc;
+ struct kvm_s390_mchk_info mchk = {};
+ unsigned long adtl_status_addr;
+ int deliver = 0;
+ int rc = 0;
+ spin_lock(&fi->lock);
spin_lock(&li->lock);
- mchk = li->irq.mchk;
+ if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
+ test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
+ /*
+ * If there was an exigent machine check pending, then any
+ * repressible machine checks that might have been pending
+ * are indicated along with it, so always clear bits for
+ * repressible and exigent interrupts
+ */
+ mchk = li->irq.mchk;
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
/*
- * If there was an exigent machine check pending, then any repressible
- * machine checks that might have been pending are indicated along
- * with it, so always clear both bits
+ * We indicate floating repressible conditions along with
+ * other pending conditions. Channel Report Pending and Channel
+ * Subsystem damage are the only two and and are indicated by
+ * bits in mcic and masked in cr14.
*/
- clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
- clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
- memset(&li->irq.mchk, 0, sizeof(mchk));
+ if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ mchk.mcic |= fi->mchk.mcic;
+ mchk.cr14 |= fi->mchk.cr14;
+ memset(&fi->mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
spin_unlock(&li->lock);
+ spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout, sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (deliver) {
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
+ &adtl_status_addr,
+ sizeof(unsigned long));
+ rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
+ adtl_status_addr);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout,
+ sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ }
return rc ? -EFAULT : 0;
}
@@ -484,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info;
- int rc = 0;
+ int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu);
spin_lock(&li->lock);
@@ -509,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
+ nullifying = true;
+ /* fall through */
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
@@ -521,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
+ nullifying = true;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
@@ -534,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
+ nullifying = true;
break;
case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
@@ -541,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
@@ -551,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
+ case PGM_STACK_FULL:
+ case PGM_STACK_EMPTY:
+ case PGM_STACK_SPECIFICATION:
+ case PGM_STACK_TYPE:
+ case PGM_STACK_OPERATION:
+ case PGM_TRACE_TABEL:
+ case PGM_CRYPTO_OPERATION:
+ nullifying = true;
+ break;
}
if (pgm_info.code & PGM_PER) {
@@ -564,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *) __LC_PER_ACCESS_ID);
}
+ if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
+ kvm_s390_rewind_psw(vcpu, ilc);
+
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
+ (u64 *) __LC_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
@@ -574,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_ext_info ext;
+ int rc = 0;
+
+ spin_lock(&fi->lock);
+ if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
+ spin_unlock(&fi->lock);
+ return 0;
+ }
+ ext = fi->srv_signal;
+ memset(&fi->srv_signal, 0, sizeof(ext));
+ clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
+ ext.ext_params);
vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
+ ext.ext_params, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
@@ -591,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ rc |= put_guest_lc(vcpu, ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
+
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
- KVM_S390_INT_PFAULT_DONE, 0,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_PFAULT] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
+ clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4,
+ "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
+ clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+ unsigned long irq_type)
{
- int rc;
+ struct list_head *isc_list;
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr,
- ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word);
-
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- return rc ? -EFAULT : 0;
-}
+ fi = &vcpu->kvm->arch.float_int;
-static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- struct kvm_s390_mchk_info *mchk = &inti->mchk;
- int rc;
+ spin_lock(&fi->lock);
+ isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
+ inti = list_first_entry_or_null(isc_list,
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ }
+ if (list_empty(isc_list))
+ clear_bit(irq_type, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+
+ if (inti) {
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ kfree(inti);
+ }
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk->mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk->cr14, mchk->mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk->mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk->fixed_logout, sizeof(mchk->fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
@@ -698,6 +779,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
[IRQ_PEND_PROG] = __deliver_prog,
[IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
[IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
@@ -706,36 +788,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_RESTART] = __deliver_restart,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+ [IRQ_PEND_EXT_SERVICE] = __deliver_service,
+ [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
+ [IRQ_PEND_VIRTIO] = __deliver_virtio,
};
-static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- int rc;
-
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- rc = __deliver_service(vcpu, inti);
- break;
- case KVM_S390_INT_PFAULT_DONE:
- rc = __deliver_pfault_done(vcpu, inti);
- break;
- case KVM_S390_INT_VIRTIO:
- rc = __deliver_virtio(vcpu, inti);
- break;
- case KVM_S390_MCHK:
- rc = __deliver_mchk_floating(vcpu, inti);
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- rc = __deliver_io(vcpu, inti);
- break;
- default:
- BUG();
- }
-
- return rc;
-}
-
/* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -751,21 +808,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *inti;
int rc;
- rc = !!deliverable_local_irqs(vcpu);
-
- if ((!rc) && atomic_read(&fi->active)) {
- spin_lock(&fi->lock);
- list_for_each_entry(inti, &fi->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&fi->lock);
- }
+ rc = !!deliverable_irqs(vcpu);
if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1;
@@ -784,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
- return 0;
- if (!ckc_interrupts_enabled(vcpu))
- return 0;
- return 1;
+ return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
@@ -884,60 +924,45 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
deliver_irq_t func;
- int deliver;
int rc = 0;
unsigned long irq_type;
- unsigned long deliverable_irqs;
+ unsigned long irqs;
__reset_intercept_indicators(vcpu);
/* pending ckc conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- if (kvm_cpu_has_pending_timer(vcpu))
+ if (ckc_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ /* pending cpu timer conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ if (cpu_timer_irq_pending(vcpu))
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+
do {
- deliverable_irqs = deliverable_local_irqs(vcpu);
+ irqs = deliverable_irqs(vcpu);
/* bits are in the order of interrupt priority */
- irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
if (irq_type == IRQ_PEND_COUNT)
break;
- func = deliver_irq_funcs[irq_type];
- if (!func) {
- WARN_ON_ONCE(func == NULL);
- clear_bit(irq_type, &li->pending_irqs);
- continue;
+ if (is_ioirq(irq_type)) {
+ rc = __deliver_io(vcpu, irq_type);
+ } else {
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
}
- rc = func(vcpu);
- } while (!rc && irq_type != IRQ_PEND_COUNT);
+ if (rc)
+ break;
+ } while (!rc);
- set_intercept_indicators_local(vcpu);
-
- if (!rc && atomic_read(&fi->active)) {
- do {
- deliver = 0;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- fi->irq_count--;
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
- if (deliver) {
- rc = __deliver_floating_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
+ set_intercept_indicators(vcpu);
return rc;
}
@@ -1172,80 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
return 0;
}
+static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
+ int isc, u32 schid)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ struct kvm_s390_interrupt_info *iter;
+ u16 id = (schid & 0xffff0000U) >> 16;
+ u16 nr = schid & 0x0000ffffU;
+ spin_lock(&fi->lock);
+ list_for_each_entry(iter, isc_list, list) {
+ if (schid && (id != iter->io.subchannel_id ||
+ nr != iter->io.subchannel_nr))
+ continue;
+ /* found an appropriate entry */
+ list_del_init(&iter->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ if (list_empty(isc_list))
+ clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return iter;
+ }
+ spin_unlock(&fi->lock);
+ return NULL;
+}
+
+/*
+ * Dequeue and return an I/O interrupt matching any of the interruption
+ * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
+ */
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid)
+ u64 isc_mask, u32 schid)
+{
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int isc;
+
+ for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
+ if (isc_mask & isc_to_isc_bits(isc))
+ inti = get_io_int(kvm, isc, schid);
+ }
+ return inti;
+}
+
+#define SCCB_MASK 0xFFFFFFF8
+#define SCCB_EVENT_PENDING 0x3
+
+static int __inject_service(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
+ /*
+ * Early versions of the QEMU s390 bios will inject several
+ * service interrupts after another without handling a
+ * condition code indicating busy.
+ * We will silently ignore those superfluous sccb values.
+ * A future version of QEMU will take care of serialization
+ * of servc requests
+ */
+ if (fi->srv_signal.ext_params & SCCB_MASK)
+ goto out;
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+out:
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_virtio(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_VIRTIO] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
+ set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static int __inject_pfault_done(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_PFAULT] >=
+ (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_PFAULT] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
+ set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+#define CR_PENDING_SUBCLASS 28
+static int __inject_float_mchk(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
+ fi->mchk.mcic |= inti->mchk.mcic;
+ set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti, *iter;
+ struct list_head *list;
+ int isc;
- if ((!schid && !cr6) || (schid && cr6))
- return NULL;
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- inti = NULL;
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (cr6 &&
- ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
- continue;
- if (schid) {
- if (((schid & 0x00000000ffff0000) >> 16) !=
- iter->io.subchannel_id)
- continue;
- if ((schid & 0x000000000000ffff) !=
- iter->io.subchannel_nr)
- continue;
- }
- inti = iter;
- break;
- }
- if (inti) {
- list_del_init(&inti->list);
- fi->irq_count--;
+ if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
}
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
+ fi->counters[FIRQ_CNTR_IO] += 1;
+
+ isc = int_word_to_isc(inti->io.io_int_word);
+ list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ list_add_tail(&inti->list, list);
+ set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
spin_unlock(&fi->lock);
- return inti;
+ return 0;
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
- int rc = 0;
+ u64 type = READ_ONCE(inti->type);
+ int rc;
fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
+
+ switch (type) {
+ case KVM_S390_MCHK:
+ rc = __inject_float_mchk(kvm, inti);
+ break;
+ case KVM_S390_INT_VIRTIO:
+ rc = __inject_virtio(kvm, inti);
+ break;
+ case KVM_S390_INT_SERVICE:
+ rc = __inject_service(kvm, inti);
+ break;
+ case KVM_S390_INT_PFAULT_DONE:
+ rc = __inject_pfault_done(kvm, inti);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ rc = __inject_io(kvm, inti);
+ break;
+ default:
rc = -EINVAL;
- goto unlock_fi;
}
- fi->irq_count++;
- if (!is_ioint(inti->type)) {
- list_add_tail(&inti->list, &fi->list);
- } else {
- u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+ if (rc)
+ return rc;
- /* Keep I/O interrupts sorted in isc order. */
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (int_word_to_isc_bits(iter->io.io_int_word)
- <= isc_bits)
- continue;
- break;
- }
- list_add_tail(&inti->list, &iter->list);
- }
- atomic_set(&fi->active, 1);
- if (atomic_read(&kvm->online_vcpus) == 0)
- goto unlock_fi;
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
do {
@@ -1257,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- switch (inti->type) {
+ switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
@@ -1270,9 +1397,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
-unlock_fi:
- spin_unlock(&fi->lock);
- return rc;
+ return 0;
+
}
int kvm_s390_inject_vm(struct kvm *kvm,
@@ -1332,10 +1458,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
return rc;
}
-void kvm_s390_reinject_io_int(struct kvm *kvm,
+int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
- __inject_vm(kvm, inti);
+ return __inject_vm(kvm, inti);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
@@ -1388,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock);
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
- spin_lock(&li->lock);
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
@@ -1433,83 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
default:
rc = -EINVAL;
}
+
+ return rc;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ spin_lock(&li->lock);
+ rc = do_inject_vcpu(vcpu, irq);
spin_unlock(&li->lock);
if (!rc)
kvm_s390_vcpu_wakeup(vcpu);
return rc;
}
-void kvm_s390_clear_float_irqs(struct kvm *kvm)
+static inline void clear_irq_list(struct list_head *_list)
{
- struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
+ struct kvm_s390_interrupt_info *inti, *n;
- fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
+ list_for_each_entry_safe(inti, n, _list, list) {
list_del(&inti->list);
kfree(inti);
}
- fi->irq_count = 0;
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
}
-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
- u8 *addr)
+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
- struct kvm_s390_irq irq = {0};
-
- irq.type = inti->type;
+ irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_SERVICE:
- irq.u.ext = inti->ext;
+ irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- irq.u.io = inti->io;
+ irq->u.io = inti->io;
break;
- case KVM_S390_MCHK:
- irq.u.mchk = inti->mchk;
- break;
- default:
- return -EINVAL;
}
+}
- if (copy_to_user(uptr, &irq, sizeof(irq)))
- return -EFAULT;
+void kvm_s390_clear_float_irqs(struct kvm *kvm)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ int i;
- return 0;
-}
+ spin_lock(&fi->lock);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ clear_irq_list(&fi->lists[i]);
+ for (i = 0; i < FIRQ_MAX_COUNT; i++)
+ fi->counters[i] = 0;
+ spin_unlock(&fi->lock);
+};
-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_irq *buf;
+ struct kvm_s390_irq *irq;
+ int max_irqs;
int ret = 0;
int n = 0;
+ int i;
+
+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
+ return -EINVAL;
+
+ /*
+ * We are already using -ENOMEM to signal
+ * userspace it may retry with a bigger buffer,
+ * so we need to use something else for this case
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return -ENOBUFS;
+
+ max_irqs = len / sizeof(struct kvm_s390_irq);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
-
- list_for_each_entry(inti, &fi->list, list) {
- if (len < sizeof(struct kvm_s390_irq)) {
+ for (i = 0; i < FIRQ_LIST_COUNT; i++) {
+ list_for_each_entry(inti, &fi->lists[i], list) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+ }
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
+ if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
- break;
+ goto out;
}
- ret = copy_irq_to_user(inti, buf);
- if (ret)
- break;
- buf += sizeof(struct kvm_s390_irq);
- len -= sizeof(struct kvm_s390_irq);
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_INT_SERVICE;
+ irq->u.ext = fi->srv_signal;
n++;
}
+ if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = fi->mchk;
+ n++;
+}
+out:
spin_unlock(&fi->lock);
+ if (!ret && n > 0) {
+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
+ ret = -EFAULT;
+ }
+ vfree(buf);
return ret < 0 ? ret : n;
}
@@ -1520,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
default:
@@ -1952,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
{
return -EINVAL;
}
+
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq *buf;
+ int r = 0;
+ int n;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user((void *) buf, irqstate, len)) {
+ r = -EFAULT;
+ goto out_free;
+ }
+
+ /*
+ * Don't allow setting the interrupt state
+ * when there are already interrupts pending
+ */
+ spin_lock(&li->lock);
+ if (li->pending_irqs) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (n = 0; n < len / sizeof(*buf); n++) {
+ r = do_inject_vcpu(vcpu, &buf[n]);
+ if (r)
+ break;
+ }
+
+out_unlock:
+ spin_unlock(&li->lock);
+out_free:
+ vfree(buf);
+
+ return r;
+}
+
+static void store_local_irq(struct kvm_s390_local_interrupt *li,
+ struct kvm_s390_irq *irq,
+ unsigned long irq_type)
+{
+ switch (irq_type) {
+ case IRQ_PEND_MCHK_EX:
+ case IRQ_PEND_MCHK_REP:
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = li->irq.mchk;
+ break;
+ case IRQ_PEND_PROG:
+ irq->type = KVM_S390_PROGRAM_INT;
+ irq->u.pgm = li->irq.pgm;
+ break;
+ case IRQ_PEND_PFAULT_INIT:
+ irq->type = KVM_S390_INT_PFAULT_INIT;
+ irq->u.ext = li->irq.ext;
+ break;
+ case IRQ_PEND_EXT_EXTERNAL:
+ irq->type = KVM_S390_INT_EXTERNAL_CALL;
+ irq->u.extcall = li->irq.extcall;
+ break;
+ case IRQ_PEND_EXT_CLOCK_COMP:
+ irq->type = KVM_S390_INT_CLOCK_COMP;
+ break;
+ case IRQ_PEND_EXT_CPU_TIMER:
+ irq->type = KVM_S390_INT_CPU_TIMER;
+ break;
+ case IRQ_PEND_SIGP_STOP:
+ irq->type = KVM_S390_SIGP_STOP;
+ irq->u.stop = li->irq.stop;
+ break;
+ case IRQ_PEND_RESTART:
+ irq->type = KVM_S390_RESTART;
+ break;
+ case IRQ_PEND_SET_PREFIX:
+ irq->type = KVM_S390_SIGP_SET_PREFIX;
+ irq->u.prefix = li->irq.prefix;
+ break;
+ }
+}
+
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
+{
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+ unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ unsigned long pending_irqs;
+ struct kvm_s390_irq irq;
+ unsigned long irq_type;
+ int cpuaddr;
+ int n = 0;
+
+ spin_lock(&li->lock);
+ pending_irqs = li->pending_irqs;
+ memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
+ sizeof(sigp_emerg_pending));
+ spin_unlock(&li->lock);
+
+ for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
+ memset(&irq, 0, sizeof(irq));
+ if (irq_type == IRQ_PEND_EXT_EMERGENCY)
+ continue;
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
+ for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
+ memset(&irq, 0, sizeof(irq));
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ irq.type = KVM_S390_INT_EMERGENCY;
+ irq.u.emerg.code = cpuaddr;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+ }
+
+ if ((sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) &
+ CPUSTAT_ECALL_PEND)) {
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ memset(&irq, 0, sizeof(irq));
+ irq.type = KVM_S390_INT_EXTERNAL_CALL;
+ irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ return n;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f6579cf..afa2bd7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,11 +25,13 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/isc.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -38,6 +40,11 @@
#include "trace.h"
#include "trace-s390.h"
+#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
+#define LOCAL_IRQS 32
+#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
+ (KVM_MAX_VCPUS + LOCAL_IRQS))
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
+ { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
@@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
- 0xff82fffbf4fc2000UL,
- 0x005c000000000000UL,
+ 0xffe6fffbfcfdfc40UL,
+ 0x205c800000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
@@ -165,16 +173,22 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_S390_CSS_SUPPORT:
- case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_S390_INJECT_IRQ:
case KVM_CAP_S390_USER_SIGP:
+ case KVM_CAP_S390_USER_STSI:
+ case KVM_CAP_S390_SKEYS:
+ case KVM_CAP_S390_IRQ_STATE:
r = 1;
break;
+ case KVM_CAP_S390_MEM_OP:
+ r = MEM_OP_MAX_SIZE;
+ break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -185,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ r = MACHINE_HAS_VX;
+ break;
default:
r = 0;
}
@@ -265,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_sigp = 1;
r = 0;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ if (MACHINE_HAS_VX) {
+ set_kvm_facility(kvm->arch.model.fac->mask, 129);
+ set_kvm_facility(kvm->arch.model.fac->list, 129);
+ r = 0;
+ } else
+ r = -EINVAL;
+ break;
+ case KVM_CAP_S390_USER_STSI:
+ kvm->arch.user_stsi = 1;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -709,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ unsigned long curkey;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Is this guest using storage keys? */
+ if (!mm_use_skey(current->mm))
+ return KVM_S390_GET_SKEYS_NONE;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ curkey = get_guest_storage_key(current->mm, hva);
+ if (IS_ERR_VALUE(curkey)) {
+ r = curkey;
+ goto out;
+ }
+ keys[i] = curkey;
+ }
+
+ r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
+ sizeof(uint8_t) * args->count);
+ if (r)
+ r = -EFAULT;
+out:
+ kvfree(keys);
+ return r;
+}
+
+static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
+ sizeof(uint8_t) * args->count);
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Enable storage key handling for the guest */
+ s390_enable_skey();
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Lowest order bit is reserved */
+ if (keys[i] & 0x01) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = set_guest_storage_key(current->mm, hva,
+ (unsigned long)keys[i], 0);
+ if (r)
+ goto out;
+ }
+out:
+ kvfree(keys);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -768,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_vm_has_attr(kvm, &attr);
break;
}
+ case KVM_S390_GET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_get_skeys(kvm, &args);
+ break;
+ }
+ case KVM_S390_SET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_set_skeys(kvm, &args);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -888,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
if (!kvm->arch.dbf)
- goto out_nodbf;
+ goto out_err;
/*
* The architectural maximum amount of facilities is 16 kbit. To store
@@ -900,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac =
(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
- goto out_nofac;
+ goto out_err;
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
@@ -920,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
if (kvm_s390_crypto_init(kvm) < 0)
- goto out_crypto;
+ goto out_err;
spin_lock_init(&kvm->arch.float_int.lock);
- INIT_LIST_HEAD(&kvm->arch.float_int.list);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
init_waitqueue_head(&kvm->arch.ipte_wq);
mutex_init(&kvm->arch.ipte_mutex);
@@ -935,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else {
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
if (!kvm->arch.gmap)
- goto out_nogmap;
+ goto out_err;
kvm->arch.gmap->private = kvm;
kvm->arch.gmap->pfault_enabled = 0;
}
@@ -947,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.start_stop_lock);
return 0;
-out_nogmap:
+out_err:
kfree(kvm->arch.crypto.crycb);
-out_crypto:
free_page((unsigned long)kvm->arch.model.fac);
-out_nofac:
debug_unregister(kvm->arch.dbf);
-out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
-out_err:
return rc;
}
@@ -1035,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ if (test_kvm_facility(vcpu->kvm, 129))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1045,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- save_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ restore_fp_ctl(&vcpu->run->s.regs.fpc);
+ restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -1058,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ save_fp_ctl(&vcpu->run->s.regs.fpc);
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
save_access_regs(vcpu->run->s.regs.acrs);
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- restore_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -1130,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
return 0;
}
+static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
+
+ vcpu->arch.cpu_id = model->cpu_id;
+ vcpu->arch.sie_block->ibc = model->ibc;
+ vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+}
+
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
@@ -1138,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_SM |
CPUSTAT_STOPPED |
CPUSTAT_GED);
+ kvm_s390_vcpu_setup_model(vcpu);
+
vcpu->arch.sie_block->ecb = 6;
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
@@ -1148,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp_has_sigpif())
vcpu->arch.sie_block->eca |= 0x10000000U;
- vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
- ICTL_TPROT;
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ vcpu->arch.sie_block->eca |= 0x00020000;
+ vcpu->arch.sie_block->ecd |= 0x20000000;
+ }
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
@@ -1159,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
- mutex_lock(&vcpu->kvm->lock);
- vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
- mutex_unlock(&vcpu->kvm->lock);
-
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -1191,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+ vcpu->arch.host_vregs = &sie_page->vregs;
vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
@@ -1206,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -1726,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
return 0;
}
+static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
+{
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ u8 opcode;
+ int rc;
+
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ trace_kvm_s390_sie_fault(vcpu);
+
+ /*
+ * We want to inject an addressing exception, which is defined as a
+ * suppressing or terminating exception. However, since we came here
+ * by a DAT access exception, the PSW still points to the faulting
+ * instruction since DAT exceptions are nullifying. So we've got
+ * to look up the current opcode to get the length of the instruction
+ * to be able to forward the PSW.
+ */
+ rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ psw->addr = __rewind_psw(*psw, -insn_length(opcode));
+
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+}
+
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
int rc = -1;
@@ -1757,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
}
}
- if (rc == -1) {
- VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
- trace_kvm_s390_sie_fault(vcpu);
- rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
+ if (rc == -1)
+ rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
@@ -1977,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
+/*
+ * store additional status at address
+ */
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long gpa)
+{
+ /* Only bits 0-53 are used for address formation */
+ if (!(gpa & ~0x3ff))
+ return 0;
+
+ return write_guest_abs(vcpu, gpa & ~0x3ff,
+ (void *)&vcpu->run->s.regs.vrs, 512);
+}
+
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!test_kvm_facility(vcpu->kvm, 129))
+ return 0;
+
+ /*
+ * The guest VXRS are in the host VXRs due to the lazy
+ * copying in vcpu load/put. Let's update our copies before we save
+ * it into the save area.
+ */
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+
+ return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
+}
+
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -2101,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ void __user *uaddr = (void __user *)mop->buf;
+ void *tmpbuf = NULL;
+ int r, srcu_idx;
+ const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
+ | KVM_S390_MEMOP_F_CHECK_ONLY;
+
+ if (mop->flags & ~supported_flags)
+ return -EINVAL;
+
+ if (mop->size > MEM_OP_MAX_SIZE)
+ return -E2BIG;
+
+ if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
+ tmpbuf = vmalloc(mop->size);
+ if (!tmpbuf)
+ return -ENOMEM;
+ }
+
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+ break;
+ }
+ r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ if (r == 0) {
+ if (copy_to_user(uaddr, tmpbuf, mop->size))
+ r = -EFAULT;
+ }
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+ break;
+ }
+ if (copy_from_user(tmpbuf, uaddr, mop->size)) {
+ r = -EFAULT;
+ break;
+ }
+ r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+
+ if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
+ kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+
+ vfree(tmpbuf);
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2110,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
long r;
switch (ioctl) {
+ case KVM_S390_IRQ: {
+ struct kvm_s390_irq s390irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
+ break;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
+ }
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
@@ -2200,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+ case KVM_S390_MEM_OP: {
+ struct kvm_s390_mem_op mem_op;
+
+ if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
+ r = kvm_s390_guest_mem_op(vcpu, &mem_op);
+ else
+ r = -EFAULT;
+ break;
+ }
+ case KVM_S390_SET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len > VCPU_IRQS_MAX_BUF ||
+ irq_state.len == 0 ||
+ irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_set_irq_state(vcpu,
+ (void __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
+ case KVM_S390_GET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len == 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_get_irq_state(vcpu,
+ (__u8 __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
default:
r = -ENOTTY;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c34109a..ca108b9 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
-static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+typedef u8 __bitwise ar_t;
+
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
- u64 *address1, u64 *address2)
+ u64 *address1, u64 *address2,
+ ar_t *ar_b1, ar_t *ar_b2)
{
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+
+ if (ar_b1)
+ *ar_b1 = base1;
+ if (ar_b2)
+ *ar_b2 = base2;
}
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
-static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
if (disp2 & 0x80000)
disp2+=0xfff00000;
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
}
-static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
-/* test availability of facility in a kvm intance */
+/* test availability of facility in a kvm instance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
return __test_facility(nr, kvm->arch.model.fac->mask) &&
__test_facility(nr, kvm->arch.model.fac->list);
}
+static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
+{
+ unsigned char *ptr;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return -EINVAL;
+ ptr = (unsigned char *) fac_list + (nr >> 3);
+ *ptr |= (0x80UL >> (nr & 7));
+ return 0;
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid);
-void kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti);
+ u64 isc_mask, u32 schid);
+int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */
@@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+ void __user *buf, int len);
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
+ __u8 __user *buf, int len);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 3511169..d22d8ee 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
struct kvm_vcpu *cpup;
s64 hostclk, val;
int i, rc;
+ ar_t ar;
u64 op2;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- op2 = kvm_s390_get_base_disp_s(vcpu);
+ op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, op2, &val, sizeof(val));
+ rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_spx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
- rc = read_guest(vcpu, operand2, &address, sizeof(address));
+ rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stpx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
address = kvm_s390_get_prefix(vcpu);
/* get the value */
- rc = write_guest(vcpu, operand2, &address, sizeof(address));
+ rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stap++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_s(vcpu);
+ ga = kvm_s390_get_base_disp_s(vcpu, &ar);
if (ga & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
+ rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
- if (kvm_s390_check_low_addr_protection(vcpu, addr))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
- int cc, rc;
+ int rc;
u64 addr;
+ ar_t ar;
- rc = 0;
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- cc = 0;
+
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
- if (!inti)
- goto no_interrupt;
- cc = 1;
+ if (!inti) {
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+ }
+
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
* provided area.
*/
len = sizeof(tpi_data) - 4;
- rc = write_guest(vcpu, addr, &tpi_data, len);
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
+ rc = write_guest(vcpu, addr, ar, &tpi_data, len);
+ if (rc) {
+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
+ goto reinject_interrupt;
+ }
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
+ /* failed writes to the low core are not recoverable */
rc = -EFAULT;
+ goto reinject_interrupt;
+ }
}
+
+ /* irq was successfully handed to the guest */
+ kfree(inti);
+ kvm_s390_set_psw_cc(vcpu, 1);
+ return 0;
+reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
- if (!rc)
+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
- else
- kvm_s390_reinject_io_int(vcpu->kvm, inti);
-no_interrupt:
- /* Set condition code and we're done. */
- if (!rc)
- kvm_s390_set_psw_cc(vcpu, cc);
+ rc = -EFAULT;
+ }
+ /* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}
static int handle_tsch(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ const u64 isc_mask = 0xffUL << 24; /* all iscs set */
- inti = kvm_s390_get_io_int(vcpu->kvm, 0,
- vcpu->run->s.regs.gprs[1]);
+ /* a valid schid has at least one bit set */
+ if (vcpu->run->s.regs.gprs[1])
+ inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
+ vcpu->run->s.regs.gprs[1]);
/*
* Prepare exit to userspace.
@@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gpsw = new_psw;
@@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->arch.stidp_data;
u64 operand2;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stidp++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
+ rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
@@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16);
}
+static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
+ u8 fc, u8 sel1, u16 sel2)
+{
+ vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
+ vcpu->run->s390_stsi.addr = addr;
+ vcpu->run->s390_stsi.ar = ar;
+ vcpu->run->s390_stsi.fc = fc;
+ vcpu->run->s390_stsi.sel1 = sel1;
+ vcpu->run->s390_stsi.sel2 = sel2;
+}
+
static int handle_stsi(struct kvm_vcpu *vcpu)
{
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
@@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0;
u64 operand2;
int rc = 0;
+ ar_t ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return 0;
}
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 0xfff)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
break;
}
- rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
+ rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
}
+ if (vcpu->kvm->arch.user_stsi) {
+ insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
+ rc = -EREMOTE;
+ }
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
- return 0;
+ return rc;
out_no_data:
kvm_s390_set_psw_cc(vcpu, 3);
out:
@@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
@@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctlg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;
+ ar_t ar;
vcpu->stat.instruction_tprot++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
/* we only handle the Linux memory detection case:
* access key == 0
@@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu);
- ret = guest_translate_address(vcpu, address1, &gpa, 1);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */
cc = 1;
- ret = guest_translate_address(vcpu, address1, &gpa, 0);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
}
if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 23b1e86..72e58bd 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
+ case SIGP_STORE_ADDITIONAL_STATUS:
+ vcpu->stat.instruction_sigp_store_adtl_status++;
+ break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+ order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP;
@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
- u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+ u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 653a7ec..3208d33 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -10,6 +10,13 @@
#define TRACE_INCLUDE_FILE trace-s390
/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR kvm_s390
+
+/*
* Trace point for the creation of the kvm instance.
*/
TRACE_EVENT(kvm_s390_create_vm,
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index a01df23..0e8fefe 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,8 +3,7 @@
#
lib-y += delay.o string.o uaccess.o find.o
-obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
-obj-$(CONFIG_64BIT) += mem64.o
+obj-y += mem.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
deleted file mode 100644
index 261152f..0000000
--- a/arch/s390/lib/div64.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * __div64_32 implementation for 31 bit.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-
-#ifdef CONFIG_MARCH_G5
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 31 bit integer using signed 64/32 bit division.
- */
-static uint32_t __div64_31(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
- uint32_t tmp;
-
- /* Special case base==1, remainder = 0, quotient = n */
- if (base == 1)
- return 0;
- /*
- * Special case base==0 will cause a fixed point divide exception
- * on the dr instruction and may not happen anyway. For the
- * following calculation we can assume base > 1. The first
- * signed 64 / 32 bit division with an upper half of 0 will
- * give the correct upper half of the 64 bit quotient.
- */
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- /*
- * To get the lower half of the 64 bit quotient and the 32 bit
- * remainder we have to use a little trick. Since we only have
- * a signed division the quotient can get too big. To avoid this
- * the 64 bit dividend is halved, then the signed division will
- * work. Afterwards the quotient and the remainder are doubled.
- * If the last bit of the dividend has been one the remainder
- * is increased by one then checked against the base. If the
- * remainder has overflown subtract base and increase the
- * quotient. Simple, no ?
- */
- asm volatile(
- " nr %2,%1\n"
- " srdl %0,1\n"
- " dr %0,%3\n"
- " alr %0,%0\n"
- " alr %1,%1\n"
- " alr %0,%2\n"
- " clr %0,%3\n"
- " jl 0f\n"
- " slr %0,%3\n"
- " ahi %1,1\n"
- "0:\n"
- : "+d" (reg2), "+d" (reg3), "=d" (tmp)
- : "d" (base), "2" (1UL) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 32 bit integer using the unsigned 64/31 bit division.
- */
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- uint32_t r;
-
- /*
- * If the most significant bit of base is set, divide n by
- * (base/2). That allows to use 64/31 bit division and gives a
- * good approximation of the result: n = (base/2)*q + r. The
- * result needs to be corrected with two simple transformations.
- * If base is already < 2^31-1 __div64_31 can be used directly.
- */
- r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
- if ((signed) base < 0) {
- uint64_t q = *n;
- /*
- * First transformation:
- * n = (base/2)*q + r
- * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
- * Since r < (base/2), r + (base/2) < base.
- * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
- * n = ((base/2)*2)*q1 + r1 with r1 < base.
- */
- if (q & 1)
- r += base/2;
- q >>= 1;
- /*
- * Second transformation. ((base/2)*2) could have lost the
- * last bit.
- * n = ((base/2)*2)*q1 + r1
- * = base*q1 - ((base&1) ? q1 : 0) + r1
- */
- if (base & 1) {
- int64_t rx = r - q;
- /*
- * base is >= 2^31. The worst case for the while
- * loop is n=2^64-1 base=2^31+1. That gives a
- * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
- * base >= 2^31 the loop is finished after a maximum
- * of three iterations.
- */
- while (rx < 0) {
- rx += base;
- q--;
- }
- r = rx;
- }
- *n = q;
- }
- return r;
-}
-
-#else /* MARCH_G5 */
-
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
-
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-#endif /* MARCH_G5 */
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem.S
index c6d553e..c6d553e 100644
--- a/arch/s390/lib/mem64.S
+++ b/arch/s390/lib/mem.S
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
deleted file mode 100644
index 14ca924..0000000
--- a/arch/s390/lib/mem32.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * String handling functions.
- *
- * Copyright IBM Corp. 2012
- */
-
-#include <linux/linkage.h>
-
-/*
- * memset implementation
- *
- * This code corresponds to the C construct below. We do distinguish
- * between clearing (c == 0) and setting a memory array (c != 0) simply
- * because nearly all memset invocations in the kernel clear memory and
- * the xc instruction is preferred in such cases.
- *
- * void *memset(void *s, int c, size_t n)
- * {
- * if (likely(c == 0))
- * return __builtin_memset(s, 0, n);
- * return __builtin_memset(s, c, n);
- * }
- */
-ENTRY(memset)
- basr %r5,%r0
-.Lmemset_base:
- ltr %r4,%r4
- bzr %r14
- ltr %r3,%r3
- jnz .Lmemset_fill
- ahi %r4,-1
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- lr %r1,%r2
- je .Lmemset_clear_rest
-.Lmemset_clear_loop:
- xc 0(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_clear_loop
-.Lmemset_clear_rest:
- ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_fill:
- stc %r3,0(%r2)
- chi %r4,1
- lr %r1,%r2
- ber %r14
- ahi %r4,-2
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- je .Lmemset_fill_rest
-.Lmemset_fill_loop:
- mvc 1(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_fill_loop
-.Lmemset_fill_rest:
- ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_xc:
- xc 0(1,%r1),0(%r1)
-.Lmemset_mvc:
- mvc 1(1,%r1),0(%r1)
-
-/*
- * memcpy implementation
- *
- * void *memcpy(void *dest, const void *src, size_t n)
- */
-ENTRY(memcpy)
- basr %r5,%r0
-.Lmemcpy_base:
- ltr %r4,%r4
- bzr %r14
- ahi %r4,-1
- lr %r0,%r4
- srl %r0,8
- ltr %r0,%r0
- lr %r1,%r2
- jnz .Lmemcpy_loop
-.Lmemcpy_rest:
- ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
- br %r14
-.Lmemcpy_loop:
- mvc 0(256,%r1),0(%r3)
- la %r1,256(%r1)
- la %r3,256(%r3)
- brct %r0,.Lmemcpy_loop
- j .Lmemcpy_rest
-.Lmemcpy_mvc:
- mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
deleted file mode 100644
index d321329..0000000
--- a/arch/s390/lib/qrnnd.S
+++ /dev/null
@@ -1,78 +0,0 @@
-# S/390 __udiv_qrnnd
-
-#include <linux/linkage.h>
-
-# r2 : &__r
-# r3 : upper half of 64 bit word n
-# r4 : lower half of 64 bit word n
-# r5 : divisor d
-# the reminder r of the division is to be stored to &__r and
-# the quotient q is to be returned
-
- .text
-ENTRY(__udiv_qrnnd)
- st %r2,24(%r15) # store pointer to reminder for later
- lr %r0,%r3 # reload n
- lr %r1,%r4
- ltr %r2,%r5 # reload and test divisor
- jp 5f
- # divisor >= 0x80000000
- srdl %r0,2 # n/4
- srl %r2,1 # d/2
- slr %r1,%r2 # special case if last bit of d is set
- brc 3,0f # (n/4) div (n/2) can overflow by 1
- ahi %r0,-1 # trick: subtract n/2, then divide
-0: dr %r0,%r2 # signed division
- ahi %r1,1 # trick part 2: add 1 to the quotient
- # now (n >> 2) = (d >> 1) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r1 # test last bit of q
- jz 1f
- alr %r0,%r2 # add (d>>1) to r
-1: srl %r1,1 # q >>= 1
- # now (n >> 2) = (d&-2) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r5 # test last bit of d
- jz 2f
- slr %r0,%r1 # r -= q
- brc 3,2f # borrow ?
- alr %r0,%r5 # r += d
- ahi %r1,-1
-2: # now (n >> 2) = d * %r1 + %r0
- alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,3f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-3: lhi %r3,2
- nr %r3,%r4 # test next to last bit of n
- jz 4f
- ahi %r0,1 # r += 1
-4: clr %r0,%r5 # r >= d ?
- jl 6f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
- # now (n >> 1) = d * %r1 + %r0
- j 6f
-5: # divisor < 0x80000000
- srdl %r0,1
- dr %r0,%r2 # signed division
- # now (n >> 1) = d * %r1 + %r0
-6: alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,7f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-7: lhi %r3,1
- nr %r3,%r4 # isolate last bit of n
- alr %r0,%r3 # r += (n & 1)
- clr %r0,%r5 # r >= d ?
- jl 8f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-8: # now n = d * %r1 + %r0
- l %r2,24(%r15)
- st %r0,0(%r2)
- lr %r2,%r1
- br %r14
- .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 53dd5d7..4614d41 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,20 +15,6 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
@@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"9: jz 7f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 4f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "4:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "10:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "4: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,6f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"5: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "6:"AHI" %4,-256\n"
+ "6: aghi %4,-256\n"
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
- "7:"SLR" %0,%0\n"
+ "7:slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n"
"10:jz 8f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
"11:jnz 1b\n"
" j 8f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "12:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "5: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,7f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"6: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
+ "7: aghi %4,-256\n"
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
- "8:"SLR" %0,%0\n"
+ "8:slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
@@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
- "7:"SLR" %0,%4\n"
+ "7: slgr %0,%4\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
" sacf 0\n"
"0: mvcs 0(%0,%1),0(%2),%3\n"
"7: jz 5f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcs 0(%0,%1),0(%2),%3\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
+ "9: slgr %0,%4\n"
" j 6f\n"
- "5:"SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
@@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
- "2:"SLR" %0,%0\n"
+ "2:slgr %0,%0\n"
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
@@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
"1: mvc 0(1,%1),0(%2)\n"
" la %1,1(%1)\n"
" la %2,1(%2)\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jnz 1b\n"
" j 5f\n"
"2: mvc 0(256,%1),0(%2)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,1b-0b(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
@@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
- "1:"ALR" %0,%2\n"
- " "SLR" %1,%2\n"
+ "1: algr %0,%2\n"
+ " slgr %1,%2\n"
" j 0b\n"
"2: la %3,4095(%1)\n"/* %4 = to + 4095 */
" nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " "SLR" %3,%1\n"
- " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " slgr %3,%1\n"
+ " clgr %0,%3\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
- " "SLR" %0,%3\n"
+ " slgr %0,%3\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4:slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
@@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
" la %2,255(%1)\n" /* %2 = ptr + 255 */
" srl %2,12\n"
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " slgr %2,%1\n"
+ " clgr %0,%2\n" /* clear crosses next page boundary? */
" jnh 5f\n"
- " "AHI" %2,-1\n"
+ " aghi %2,-1\n"
"1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
+ " aghi %2,1\n"
+ " slgr %0,%2\n"
" j 5f\n"
"2: xc 0(256,%1),0(%1)\n"
" la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
@@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
asm volatile(
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
+ " slgr %0,%0\n"
" sacf 256\n"
"0: srst %3,%2\n"
" jo 0b\n"
" la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
+ " slgr %0,%1\n"
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
@@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt);
static int __init uaccess_init(void)
{
- if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+ if (!uaccess_primary && test_facility(27))
static_key_slow_inc(&have_mvcos);
return 0;
}
diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c
deleted file mode 100644
index 3e05ff5..0000000
--- a/arch/s390/lib/ucmpdi2.c
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/module.h>
-
-union ull_union {
- unsigned long long ull;
- struct {
- unsigned int high;
- unsigned int low;
- } ui;
-};
-
-int __ucmpdi2(unsigned long long a, unsigned long long b)
-{
- union ull_union au = {.ull = a};
- union ull_union bu = {.ull = b};
-
- if (au.ui.high < bu.ui.high)
- return 0;
- else if (au.ui.high > bu.ui.high)
- return 2;
- if (au.ui.low < bu.ui.low)
- return 0;
- else if (au.ui.low > bu.ui.low)
- return 2;
- return 1;
-}
-EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
deleted file mode 100644
index 51d3995..0000000
--- a/arch/s390/math-emu/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the FPU instruction emulation.
-#
-
-obj-$(CONFIG_MATHEMU) := math.o
-
-ccflags-y := -I$(src) -Iinclude/math-emu -w
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
deleted file mode 100644
index a6ba0d7..0000000
--- a/arch/s390/math-emu/math.c
+++ /dev/null
@@ -1,2255 +0,0 @@
-/*
- * S390 version
- * Copyright IBM Corp. 1999, 2001
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- * 'math.c' emulates IEEE instructions on a S390 processor
- * that does not have the IEEE fpu (all processors before G5).
- */
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/uaccess.h>
-#include <asm/lowcore.h>
-
-#include <asm/sfp-util.h>
-#include <math-emu/soft-fp.h>
-#include <math-emu/single.h>
-#include <math-emu/double.h>
-#include <math-emu/quad.h>
-
-#define FPC_VALID_MASK 0xF8F8FF03
-
-/*
- * I miss a macro to round a floating point number to the
- * nearest integer in the same floating point format.
- */
-#define _FP_TO_FPINT_ROUND(fs, wc, X) \
- do { \
- switch (X##_c) \
- { \
- case FP_CLS_NORMAL: \
- if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \
- { /* floating point number has no bits after the dot. */ \
- } \
- else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \
- X##_e > _FP_EXPBIAS_##fs) \
- { /* some bits before the dot, some after it. */ \
- _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \
- X##_e - _FP_EXPBIAS_##fs \
- + _FP_FRACBITS_##fs); \
- _FP_ROUND(wc, X); \
- _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \
- + _FP_FRACBITS_##fs); \
- } \
- else \
- { /* all bits after the dot. */ \
- FP_SET_EXCEPTION(FP_EX_INEXACT); \
- X##_c = FP_CLS_ZERO; \
- } \
- break; \
- case FP_CLS_NAN: \
- case FP_CLS_INF: \
- case FP_CLS_ZERO: \
- break; \
- } \
- } while (0)
-
-#define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X)
-#define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X)
-#define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X)
-
-typedef union {
- long double ld;
- struct {
- __u64 high;
- __u64 low;
- } w;
-} mathemu_ldcv;
-
-#ifdef CONFIG_SYSCTL
-int sysctl_ieee_emulation_warnings=1;
-#endif
-
-#define mathemu_put_user(x, p) \
- do { \
- if (put_user((x),(p))) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_get_user(x, p) \
- do { \
- if (get_user((x),(p))) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_copy_from_user(d, s, n)\
- do { \
- if (copy_from_user((d),(s),(n)) != 0) \
- return SIGSEGV; \
- } while (0)
-
-#define mathemu_copy_to_user(d, s, n) \
- do { \
- if (copy_to_user((d),(s),(n)) != 0) \
- return SIGSEGV; \
- } while (0)
-
-static void display_emulation_not_implemented(struct pt_regs *regs, char *instr)
-{
- __u16 *location;
-
-#ifdef CONFIG_SYSCTL
- if(sysctl_ieee_emulation_warnings)
-#endif
- {
- location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
- printk("%s ieee fpu instruction not emulated "
- "process name: %s pid: %d \n",
- instr, current->comm, current->pid);
- printk("%s's PSW: %08lx %08lx\n", instr,
- (unsigned long) regs->psw.mask,
- (unsigned long) location);
- }
-}
-
-static inline void emu_set_CC (struct pt_regs *regs, int cc)
-{
- regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12);
-}
-
-/*
- * Set the condition code in the user psw.
- * 0 : Result is zero
- * 1 : Result is less than zero
- * 2 : Result is greater than zero
- * 3 : Result is NaN or INF
- */
-static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign)
-{
- switch (class) {
- case FP_CLS_NORMAL:
- case FP_CLS_INF:
- emu_set_CC(regs, sign ? 1 : 2);
- break;
- case FP_CLS_ZERO:
- emu_set_CC(regs, 0);
- break;
- case FP_CLS_NAN:
- emu_set_CC(regs, 3);
- break;
- }
-}
-
-/* Add long double */
-static int emu_axbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_ADD_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Add double */
-static int emu_adbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_ADD_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Add double */
-static int emu_adb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_ADD_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Add float */
-static int emu_aebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_ADD_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Add float */
-static int emu_aeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_ADD_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Compare long double */
-static int emu_cxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB);
- mathemu_ldcv cvt;
- int IR;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_RAW_QP(QB, &cvt.ld);
- FP_CMP_Q(IR, QA, QB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare double */
-static int emu_cdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare double */
-static int emu_cdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, val);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare float */
-static int emu_cebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare float */
-static int emu_ceb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, val);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- return 0;
-}
-
-/* Compare and signal long double */
-static int emu_kxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int IR;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_CMP_Q(IR, QA, QB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal double */
-static int emu_kdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal double */
-static int emu_kdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_RAW_DP(DB, val);
- FP_CMP_D(IR, DA, DB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal float */
-static int emu_kebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Compare and signal float */
-static int emu_keb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB);
- FP_DECL_EX;
- int IR;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_RAW_SP(SB, val);
- FP_CMP_S(IR, SA, SB, 3);
- /*
- * IR == -1 if DA < DB, IR == 0 if DA == DB,
- * IR == 1 if DA > DB and IR == 3 if unorderded
- */
- emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR);
- if (IR == 3)
- FP_SET_EXCEPTION (FP_EX_INVALID);
- return _fex;
-}
-
-/* Convert from fixed long double */
-static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_Q(QR, si, 32, int);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Convert from fixed double */
-static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DR);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_D(DR, si, 32, int);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Convert from fixed float */
-static int emu_cefbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SR);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- si = regs->gprs[ry];
- FP_FROM_INT_S(SR, si, 32, int);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Convert to fixed long double */
-static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_Q(QA);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_TO_INT_ROUND_Q(si, QA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, QA_c, QA_s);
- return _fex;
-}
-
-/* Convert to fixed double */
-static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_D(DA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_TO_INT_ROUND_D(si, DA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, DA_c, DA_s);
- return _fex;
-}
-
-/* Convert to fixed float */
-static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) {
- FP_DECL_S(SA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = current->thread.fp_regs.fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_TO_INT_ROUND_S(si, SA, 32, 1);
- regs->gprs[rx] = si;
- emu_set_CC_cs(regs, SA_c, SA_s);
- return _fex;
-}
-
-/* Divide long double */
-static int emu_dxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_DIV_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Divide double */
-static int emu_ddbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_DIV_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Divide double */
-static int emu_ddb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_DIV_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Divide float */
-static int emu_debr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_DIV_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Divide float */
-static int emu_deb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_DIV_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Divide to integer double */
-static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) {
- display_emulation_not_implemented(regs, "didbr");
- return 0;
-}
-
-/* Divide to integer float */
-static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) {
- display_emulation_not_implemented(regs, "diebr");
- return 0;
-}
-
-/* Extract fpc */
-static int emu_efpc (struct pt_regs *regs, int rx, int ry) {
- regs->gprs[rx] = current->thread.fp_regs.fpc;
- return 0;
-}
-
-/* Load and test long double */
-static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- mathemu_ldcv cvt;
- FP_DECL_Q(QA);
- FP_DECL_EX;
-
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui;
- emu_set_CC_cs(regs, QA_c, QA_s);
- return _fex;
-}
-
-/* Load and test double */
-static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_D(DA);
- FP_DECL_EX;
-
- FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(regs, DA_c, DA_s);
- return _fex;
-}
-
-/* Load and test double */
-static int emu_ltebr (struct pt_regs *regs, int rx, int ry) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_S(SA);
- FP_DECL_EX;
-
- FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
- fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui;
- emu_set_CC_cs(regs, SA_c, SA_s);
- return _fex;
-}
-
-/* Load complement long double */
-static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load complement double */
-static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load complement float */
-static int emu_lcebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load floating point integer long double */
-static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_Q(QA);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- cvt.w.high = fp_regs->fprs[ry].ui;
- cvt.w.low = fp_regs->fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_TO_FPINT_ROUND_Q(QA);
- FP_PACK_QP(&cvt.ld, QA);
- fp_regs->fprs[rx].ui = cvt.w.high;
- fp_regs->fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load floating point integer double */
-static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) {
- /* FIXME: rounding mode !! */
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_D(DA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d);
- FP_TO_FPINT_ROUND_D(DA);
- FP_PACK_DP(&fp_regs->fprs[rx].d, DA);
- return _fex;
-}
-
-/* Load floating point integer float */
-static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- FP_DECL_S(SA);
- FP_DECL_EX;
- __s32 si;
- int mode;
-
- if (mask == 0)
- mode = fp_regs->fpc & 3;
- else if (mask == 1)
- mode = FP_RND_NEAREST;
- else
- mode = mask - 4;
- FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f);
- FP_TO_FPINT_ROUND_S(SA);
- FP_PACK_SP(&fp_regs->fprs[rx].f, SA);
- return _fex;
-}
-
-/* Load lengthened double to long double */
-static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (Q, D, 4, 2, QR, DA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened double to long double */
-static int emu_lxdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, val);
- FP_CONV (Q, D, 4, 2, QR, DA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to long double */
-static int emu_lxebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (Q, S, 4, 1, QR, SA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to long double */
-static int emu_lxeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_CONV (Q, S, 4, 1, QR, SA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Load lengthened float to double */
-static int emu_ldebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (D, S, 2, 1, DR, SA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Load lengthened float to double */
-static int emu_ldeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_CONV (D, S, 2, 1, DR, SA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Load negative long double */
-static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- if (QA_s == 0) {
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- } else {
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- current->thread.fp_regs.fprs[rx+2].ui =
- current->thread.fp_regs.fprs[ry+2].ui;
- }
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load negative double */
-static int emu_lndbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- if (DA_s == 0) {
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load negative float */
-static int emu_lnebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- if (SA_s == 0) {
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load positive long double */
-static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- if (QA_s != 0) {
- FP_NEG_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- } else{
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- current->thread.fp_regs.fprs[rx+2].ui =
- current->thread.fp_regs.fprs[ry+2].ui;
- }
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Load positive double */
-static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- if (DA_s != 0) {
- FP_NEG_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Load positive float */
-static int emu_lpebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- if (SA_s != 0) {
- FP_NEG_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- } else
- current->thread.fp_regs.fprs[rx].ui =
- current->thread.fp_regs.fprs[ry].ui;
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Load rounded long double to double */
-static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_D(DR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_CONV (D, Q, 2, 4, DR, QA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].f, DR);
- return _fex;
-}
-
-/* Load rounded long double to float */
-static int emu_lexbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_S(SR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_CONV (S, Q, 1, 4, SR, QA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Load rounded double to float */
-static int emu_ledbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (S, D, 1, 2, SR, DA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply long double */
-static int emu_mxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply double */
-static int emu_mdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply double */
-static int emu_mdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply double to long double */
-static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_CONV (Q, D, 4, 2, QA, DA);
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_CONV (Q, D, 4, 2, QB, DA);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply double to long double */
-static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_UNPACK_QP(QB, val);
- FP_MUL_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- return _fex;
-}
-
-/* Multiply float */
-static int emu_meebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_MUL_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply float */
-static int emu_meeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_MUL_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- return _fex;
-}
-
-/* Multiply float to double */
-static int emu_mdebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_CONV (D, S, 2, 1, DA, SA);
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_CONV (D, S, 2, 1, DB, SA);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply float to double */
-static int emu_mdeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_CONV (D, S, 2, 1, DA, SA);
- FP_UNPACK_SP(SA, val);
- FP_CONV (D, S, 2, 1, DB, SA);
- FP_MUL_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- return _fex;
-}
-
-/* Multiply and add double */
-static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_ADD_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and add double */
-static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_ADD_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and add float */
-static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_ADD_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and add float */
-static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_ADD_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and subtract double */
-static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_SUB_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and subtract double */
-static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d);
- FP_MUL_D(DR, DA, DB);
- FP_SUB_D(DR, DR, DC);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR);
- return _fex;
-}
-
-/* Multiply and subtract float */
-static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_SUB_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Multiply and subtract float */
-static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f);
- FP_MUL_S(SR, SA, SB);
- FP_SUB_S(SR, SR, SC);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR);
- return _fex;
-}
-
-/* Set floating point control word */
-static int emu_sfpc (struct pt_regs *regs, int rx, int ry) {
- __u32 temp;
-
- temp = regs->gprs[rx];
- if ((temp & ~FPC_VALID_MASK) != 0)
- return SIGILL;
- current->thread.fp_regs.fpc = temp;
- return 0;
-}
-
-/* Square root long double */
-static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- FP_SQRT_Q(QR, QA);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Square root double */
-static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d);
- FP_SQRT_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Square root double */
-static int emu_sqdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, val);
- FP_SQRT_D(DR, DA);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Square root float */
-static int emu_sqebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f);
- FP_SQRT_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Square root float */
-static int emu_sqeb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, val);
- FP_SQRT_S(SR, SA);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Subtract long double */
-static int emu_sxbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
- FP_DECL_EX;
- mathemu_ldcv cvt;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_QP(QA, &cvt.ld);
- cvt.w.high = current->thread.fp_regs.fprs[ry].ui;
- cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui;
- FP_UNPACK_QP(QB, &cvt.ld);
- FP_SUB_Q(QR, QA, QB);
- FP_PACK_QP(&cvt.ld, QR);
- current->thread.fp_regs.fprs[rx].ui = cvt.w.high;
- current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low;
- emu_set_CC_cs(regs, QR_c, QR_s);
- return _fex;
-}
-
-/* Subtract double */
-static int emu_sdbr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d);
- FP_SUB_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Subtract double */
-static int emu_sdb (struct pt_regs *regs, int rx, double *val) {
- FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- FP_UNPACK_DP(DB, val);
- FP_SUB_D(DR, DA, DB);
- FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR);
- emu_set_CC_cs(regs, DR_c, DR_s);
- return _fex;
-}
-
-/* Subtract float */
-static int emu_sebr (struct pt_regs *regs, int rx, int ry) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f);
- FP_SUB_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Subtract float */
-static int emu_seb (struct pt_regs *regs, int rx, float *val) {
- FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
- FP_DECL_EX;
- int mode;
-
- mode = current->thread.fp_regs.fpc & 3;
- FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- FP_UNPACK_SP(SB, val);
- FP_SUB_S(SR, SA, SB);
- FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR);
- emu_set_CC_cs(regs, SR_c, SR_s);
- return _fex;
-}
-
-/* Test data class long double */
-static int emu_tcxb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_Q(QA);
- mathemu_ldcv cvt;
- int bit;
-
- cvt.w.high = current->thread.fp_regs.fprs[rx].ui;
- cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui;
- FP_UNPACK_RAW_QP(QA, &cvt.ld);
- switch (QA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_4(QA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_Q:
- if (_FP_FRAC_ZEROP_4(QA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!QA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-/* Test data class double */
-static int emu_tcdb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_D(DA);
- int bit;
-
- FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d);
- switch (DA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_2(DA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_D:
- if (_FP_FRAC_ZEROP_2(DA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!DA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-/* Test data class float */
-static int emu_tceb (struct pt_regs *regs, int rx, long val) {
- FP_DECL_S(SA);
- int bit;
-
- FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f);
- switch (SA_e) {
- default:
- bit = 8; /* normalized number */
- break;
- case 0:
- if (_FP_FRAC_ZEROP_1(SA))
- bit = 10; /* zero */
- else
- bit = 6; /* denormalized number */
- break;
- case _FP_EXPMAX_S:
- if (_FP_FRAC_ZEROP_1(SA))
- bit = 4; /* infinity */
- else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S)
- bit = 2; /* quiet NAN */
- else
- bit = 0; /* signaling NAN */
- break;
- }
- if (!SA_s)
- bit++;
- emu_set_CC(regs, ((__u32) val >> bit) & 1);
- return 0;
-}
-
-static inline void emu_load_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
- : "1");
-}
-
-static inline void emu_load_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1");
-}
-
-static inline void emu_store_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
- : "1");
-}
-
-
-static inline void emu_store_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
- return;
- asm volatile( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1");
-}
-
-int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
- int _fex = 0;
- static const __u8 format_table[256] = {
- [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03,
- [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d,
- [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03,
- [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06,
- [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02,
- [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03,
- [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02,
- [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05,
- [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01,
- [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04,
- [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01,
- [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06,
- [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13,
- [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c,
- [0x99] = 0x0b,[0x9a] = 0x0a
- };
- static const void *jump_table[256]= {
- [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr,
- [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr,
- [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr,
- [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr,
- [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr,
- [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr,
- [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr,
- [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr,
- [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr,
- [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr,
- [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr,
- [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr,
- [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr,
- [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr,
- [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr,
- [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr,
- [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc,
- [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr,
- [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr,
- [0x9a] = emu_cfxbr
- };
-
- switch (format_table[opcode[1]]) {
- case 1: /* RRE format, long double operation */
- if (opcode[3] & 0x22)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(((opcode[3] >> 4) & 15) + 2);
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *,int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- emu_load_regd(opcode[3] & 15);
- emu_load_regd((opcode[3] & 15) + 2);
- break;
- case 2: /* RRE format, double operation */
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(opcode[3] & 15);
- break;
- case 3: /* RRE format, float operation */
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- emu_load_rege(opcode[3] & 15);
- break;
- case 4: /* RRF format, long double operation */
- if (opcode[3] & 0x22)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(((opcode[3] >> 4) & 15) + 2);
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- emu_load_regd(opcode[3] & 15);
- emu_load_regd((opcode[3] & 15) + 2);
- break;
- case 5: /* RRF format, double operation */
- emu_store_regd((opcode[2] >> 4) & 15);
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_regd((opcode[2] >> 4) & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(opcode[3] & 15);
- break;
- case 6: /* RRF format, float operation */
- emu_store_rege((opcode[2] >> 4) & 15);
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- emu_load_rege((opcode[2] >> 4) & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- emu_load_rege(opcode[3] & 15);
- break;
- case 7: /* RRE format, cxfbr instruction */
- /* call the emulation function */
- if (opcode[3] & 0x20)
- return SIGILL;
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 8: /* RRE format, cdfbr instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 9: /* RRE format, cefbr instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 10: /* RRF format, cfxbr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 11: /* RRF format, cfdbr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 12: /* RRF format, cfebr instruction */
- if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32)
- /* mask of { 2,3,8-15 } is invalid */
- return SIGILL;
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4);
- break;
- case 13: /* RRE format, ldxbr & mdxbr instruction */
- /* double store but long double load */
- if (opcode[3] & 0x20)
- return SIGILL;
- emu_store_regd((opcode[3] >> 4) & 15);
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 14: /* RRE format, ldxbr & mdxbr instruction */
- /* float store but long double load */
- if (opcode[3] & 0x20)
- return SIGILL;
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- emu_load_regd(((opcode[3] >> 4) & 15) + 2);
- break;
- case 15: /* RRE format, ldebr & mdebr instruction */
- /* float store but double load */
- emu_store_rege((opcode[3] >> 4) & 15);
- emu_store_rege(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 16: /* RRE format, ldxbr instruction */
- /* long double store but double load */
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_regd((opcode[3] >> 4) & 15);
- break;
- case 17: /* RRE format, ldxbr instruction */
- /* long double store but float load */
- if (opcode[3] & 2)
- return SIGILL;
- emu_store_regd(opcode[3] & 15);
- emu_store_regd((opcode[3] & 15) + 2);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 18: /* RRE format, ledbr instruction */
- /* double store but float load */
- emu_store_regd(opcode[3] & 15);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- emu_load_rege((opcode[3] >> 4) & 15);
- break;
- case 19: /* RRE format, efpc & sfpc instruction */
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, int))
- jump_table[opcode[1]])
- (regs, opcode[3] >> 4, opcode[3] & 15);
- break;
- default: /* invalid operation */
- return SIGILL;
- }
- if (_fex != 0) {
- current->thread.fp_regs.fpc |= _fex;
- if (current->thread.fp_regs.fpc & (_fex << 8))
- return SIGFPE;
- }
- return 0;
-}
-
-static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp)
-{
- addr_t addr;
-
- rx &= 15;
- rb &= 15;
- addr = disp & 0xfff;
- addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */
- addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */
- return (void*) addr;
-}
-
-int math_emu_ed(__u8 *opcode, struct pt_regs * regs) {
- int _fex = 0;
-
- static const __u8 format_table[256] = {
- [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05,
- [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02,
- [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04,
- [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02,
- [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01,
- [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01,
- [0x1e] = 0x03,[0x1f] = 0x03,
- };
- static const void *jump_table[]= {
- [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb,
- [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb,
- [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb,
- [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb,
- [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb,
- [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb,
- [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb,
- [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb,
- [0x1e] = emu_madb,[0x1f] = emu_msdb
- };
-
- switch (format_table[opcode[5]]) {
- case 1: /* RXE format, double constant */ {
- __u64 *dxb, temp;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 2: /* RXE format, float constant */ {
- __u32 *dxb, temp;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_rege((opcode[1] >> 4) & 15);
- break;
- }
- case 3: /* RXF format, double constant */ {
- __u64 *dxb, temp;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- emu_store_regd((opcode[4] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *, int))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 4: /* RXF format, float constant */ {
- __u32 *dxb, temp;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- emu_store_rege((opcode[4] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *, int))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4);
- emu_load_rege((opcode[4] >> 4) & 15);
- break;
- }
- case 5: /* RXE format, double constant */
- /* store double and load long double */
- {
- __u64 *dxb, temp;
- __u32 opc;
- if ((opcode[1] >> 4) & 0x20)
- return SIGILL;
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&temp, dxb, 8);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, double *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (double *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- emu_load_regd(((opcode[1] >> 4) & 15) + 2);
- break;
- }
- case 6: /* RXE format, float constant */
- /* store float and load double */
- {
- __u32 *dxb, temp;
- __u32 opc;
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- break;
- }
- case 7: /* RXE format, float constant */
- /* store float and load long double */
- {
- __u32 *dxb, temp;
- __u32 opc;
- if ((opcode[1] >> 4) & 0x20)
- return SIGILL;
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_get_user(temp, dxb);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, float *))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, (float *) &temp);
- emu_load_regd((opcode[1] >> 4) & 15);
- emu_load_regd(((opcode[1] >> 4) & 15) + 2);
- break;
- }
- case 8: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- emu_store_rege((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- case 9: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- emu_store_regd((opcode[1] >> 4) & 15);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- case 10: /* RXE format, RX address used as int value */ {
- __u64 dxb;
- __u32 opc;
-
- if ((opcode[1] >> 4) & 2)
- return SIGILL;
- emu_store_regd((opcode[1] >> 4) & 15);
- emu_store_regd(((opcode[1] >> 4) & 15) + 2);
- opc = *((__u32 *) opcode);
- dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc);
- /* call the emulation function */
- _fex = ((int (*)(struct pt_regs *, int, long))
- jump_table[opcode[5]])
- (regs, opcode[1] >> 4, dxb);
- break;
- }
- default: /* invalid operation */
- return SIGILL;
- }
- if (_fex != 0) {
- current->thread.fp_regs.fpc |= _fex;
- if (current->thread.fp_regs.fpc & (_fex << 8))
- return SIGFPE;
- }
- return 0;
-}
-
-/*
- * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
- */
-int math_emu_ldr(__u8 *opcode) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u16 opc = *((__u16 *) opcode);
-
- if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
- /* we got an exception therefore ry can't be in {0,2,4,6} */
- asm volatile( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
- : "1");
- } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- asm volatile ( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
- : "1");
- } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
- fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
- return 0;
-}
-
-/*
- * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6}
- */
-int math_emu_ler(__u8 *opcode) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u16 opc = *((__u16 *) opcode);
-
- if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
- /* we got an exception therefore ry can't be in {0,2,4,6} */
- asm volatile( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
- : "1");
- } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- asm volatile( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
- : "1");
- } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
- fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
- return 0;
-}
-
-/*
- * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_ld(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u64 *dxb;
-
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8);
- return 0;
-}
-
-/*
- * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_le(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u32 *mem, *dxb;
-
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
- mathemu_get_user(mem[0], dxb);
- return 0;
-}
-
-/*
- * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_std(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u64 *dxb;
-
- dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8);
- return 0;
-}
-
-/*
- * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6}
- */
-int math_emu_ste(__u8 *opcode, struct pt_regs * regs) {
- s390_fp_regs *fp_regs = &current->thread.fp_regs;
- __u32 opc = *((__u32 *) opcode);
- __u32 *mem, *dxb;
-
- dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc);
- mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f);
- mathemu_put_user(mem[0], dxb);
- return 0;
-}
-
-/*
- * Emulate LFPC D(B)
- */
-int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 *dxb, temp;
-
- dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
- mathemu_get_user(temp, dxb);
- if ((temp & ~FPC_VALID_MASK) != 0)
- return SIGILL;
- current->thread.fp_regs.fpc = temp;
- return 0;
-}
-
-/*
- * Emulate STFPC D(B)
- */
-int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 *dxb;
-
- dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc);
- mathemu_put_user(current->thread.fp_regs.fpc, dxb);
- return 0;
-}
-
-/*
- * Emulate SRNM D(B)
- */
-int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) {
- __u32 opc = *((__u32 *) opcode);
- __u32 temp;
-
- temp = calc_addr(regs, 0, opc>>12, opc);
- current->thread.fp_regs.fpc &= ~3;
- current->thread.fp_regs.fpc |= (temp & 3);
- return 0;
-}
-
-/* broken compiler ... */
-long long
-__negdi2 (long long u)
-{
-
- union lll {
- long long ll;
- long s[2];
- };
-
- union lll w,uu;
-
- uu.ll = u;
-
- w.s[1] = -uu.s[1];
- w.s[0] = -uu.s[0] - ((int) w.s[1] != 0);
-
- return w.ll;
-}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cade..8556d6b 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
KERNEL_END_NR,
VMEMMAP_NR,
VMALLOC_NR,
-#ifdef CONFIG_64BIT
MODULES_NR,
-#endif
};
static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
-#ifdef CONFIG_64BIT
[MODULES_NR] = {0, "Modules Area"},
-#endif
{ -1, NULL }
};
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
-#else
-#define _PMD_PROT_MASK 0
-#endif
-
static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pud_t *pud, unsigned long addr)
{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
- prot = pmd_val(*pmd) & _PMD_PROT_MASK;
+ prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
note_page(m, st, prot, 3);
} else
walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PUD_PROT_MASK _REGION3_ENTRY_RO
-#else
-#define _PUD_PROT_MASK 0
-#endif
-
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
- prot = pud_val(*pud) & _PUD_PROT_MASK;
+ prot = pud_val(*pud) & _REGION3_ENTRY_RO;
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
-#ifdef CONFIG_32BIT
- max_addr = 1UL << 31;
-#else
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[MODULES_NR].start_address = MODULES_VADDR;
-#endif
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba7..23c4969 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
struct qrange range[6];
};
-#ifdef CONFIG_64BIT
struct qrange_old {
unsigned int start; /* last byte type */
unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
int segrcnt;
struct qrange_old range[6];
};
-#endif
struct qin64 {
char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
static int
dcss_set_subcodes(void)
{
-#ifdef CONFIG_64BIT
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
segext_scode = DCSS_SEGEXTX;
return 0;
}
-#endif
/* Diag x'64' new subcodes are not supported, set to old subcodes */
loadshr_scode = DCSS_LOADNOLY;
loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
-#ifdef CONFIG_64BIT
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
if (*func > DCSS_SEGEXT)
asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#else
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#endif
*ret1 = rx;
*ret2 = ry;
return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
goto out_free;
}
-#ifdef CONFIG_64BIT
/* Only old format of output area of Diagnose x'64' is supported,
copy data for the new format. */
if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
}
kfree(qout_old);
}
-#endif
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff8653..76515bc 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
#include <asm/facility.h>
#include "../kernel/entry.h"
-#ifndef CONFIG_64BIT
-#define __FAIL_ADDR_MASK 0x7ffff000
-#define __SUBCODE_MASK 0x0200
-#define __PF_RES_FIELD 0ULL
-#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
-#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
static unsigned long store_indication __read_mostly;
-#ifdef CONFIG_64BIT
static int __init fault_init(void)
{
if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
return 0;
}
early_initcall(fault_init);
-#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
return probe_kernel_address((unsigned long *)p, dummy);
}
-#ifdef CONFIG_64BIT
static void dump_pagetable(unsigned long asce, unsigned long address)
{
unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
pr_cont("BAD\n");
}
-#else /* CONFIG_64BIT */
-
-static void dump_pagetable(unsigned long asce, unsigned long address)
-{
- unsigned long *table = __va(asce & PAGE_MASK);
-
- pr_alert("AS:%08lx ", asce);
- table = table + ((address >> 20) & 0x7ff);
- if (bad_address(table))
- goto bad;
- pr_cont("S:%08lx ", *table);
- if (*table & _SEGMENT_ENTRY_INVALID)
- goto out;
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table = table + ((address >> 12) & 0xff);
- if (bad_address(table))
- goto bad;
- pr_cont("P:%08lx ", *table);
-out:
- pr_cont("\n");
- return;
-bad:
- pr_cont("BAD\n");
-}
-
-#endif /* CONFIG_64BIT */
-
static void dump_fault_info(struct pt_regs *regs)
{
unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c7..1eb41bb 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
pmd_t *pmdp, pmd;
pmdp = (pmd_t *) pudp;
-#ifdef CONFIG_64BIT
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pmdp = (pmd_t *) pud_deref(pud);
pmdp += pmd_index(addr);
-#endif
do {
pmd = *pmdp;
barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
pud_t *pudp, pud;
pudp = (pud_t *) pgdp;
-#ifdef CONFIG_64BIT
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pudp = (pud_t *) pgd_deref(pgd);
pudp += pud_index(addr);
-#endif
do {
pud = *pudp;
barrier();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b151..80875c4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
-#ifdef CONFIG_64BIT
if (VMALLOC_END > (1UL << 42)) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
-#else
- asce_bits = _ASCE_TABLE_LENGTH;
- pgd_type = _SEGMENT_ENTRY_EMPTY;
-#endif
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 2eb34bd..8a993a5 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -1,7 +1,7 @@
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2015
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
@@ -16,51 +16,55 @@
#include <asm/ctl_reg.h>
#include <asm/io.h>
-/*
- * This function writes to kernel memory bypassing DAT and possible
- * write protection. It copies one to four bytes from src to dst
- * using the stura instruction.
- * Returns the number of bytes copied or -EFAULT.
- */
-static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
+static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
- unsigned long count, aligned;
- int offset, mask;
- int rc = -EFAULT;
+ unsigned long aligned, offset, count;
+ char tmp[8];
- aligned = (unsigned long) dst & ~3UL;
- offset = (unsigned long) dst & 3;
- count = min_t(unsigned long, 4 - offset, size);
- mask = (0xf << (4 - count)) & 0xf;
- mask >>= offset;
+ aligned = (unsigned long) dst & ~7UL;
+ offset = (unsigned long) dst & 7UL;
+ size = min(8UL - offset, size);
+ count = size - 1;
asm volatile(
" bras 1,0f\n"
- " icm 0,0,0(%3)\n"
- "0: l 0,0(%1)\n"
- " lra %1,0(%1)\n"
- "1: ex %2,0(1)\n"
- "2: stura 0,%1\n"
- " la %0,0\n"
- "3:\n"
- EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
- : "+d" (rc), "+a" (aligned)
- : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
- return rc ? rc : count;
+ " mvc 0(1,%4),0(%5)\n"
+ "0: mvc 0(8,%3),0(%0)\n"
+ " ex %1,0(1)\n"
+ " lg %1,0(%3)\n"
+ " lra %0,0(%0)\n"
+ " sturg %1,%0\n"
+ : "+&a" (aligned), "+&a" (count), "=m" (tmp)
+ : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
+ : "cc", "memory", "1");
+ return size;
}
-long probe_kernel_write(void *dst, const void *src, size_t size)
+/*
+ * s390_kernel_write - write to kernel memory bypassing DAT
+ * @dst: destination address
+ * @src: source address
+ * @size: number of bytes to copy
+ *
+ * This function writes to kernel memory bypassing DAT and possible page table
+ * write protection. It writes to the destination using the sturg instruction.
+ * Therefore we have a read-modify-write sequence: the function reads eight
+ * bytes from destination at an eight byte boundary, modifies the bytes
+ * requested and writes the result back in a loop.
+ *
+ * Note: this means that this function may not be called concurrently on
+ * several cpus with overlapping words, since this may potentially
+ * cause data corruption.
+ */
+void notrace s390_kernel_write(void *dst, const void *src, size_t size)
{
- long copied = 0;
+ long copied;
while (size) {
- copied = probe_kernel_write_odd(dst, src, size);
- if (copied < 0)
- break;
+ copied = s390_kernel_write_odd(dst, src, size);
dst += copied;
src += copied;
size -= copied;
}
- return copied < 0 ? -EFAULT : 0;
}
static int __memcpy_real(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe..0f36043 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
- if (IS_ENABLED(CONFIG_32BIT)) {
- rzm = min(ADDR2G, rzm);
- memsize = min(ADDR2G, memsize);
- }
max_physmem_end = memsize;
addr = 0;
/* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 179a2c2..6e552af 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -32,7 +32,7 @@
#include <asm/pgalloc.h>
unsigned long mmap_rnd_mask;
-unsigned long mmap_align_mask;
+static unsigned long mmap_align_mask;
static unsigned long stack_maxrandom_size(void)
{
@@ -60,22 +60,20 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
if (is_32bit_task())
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
else
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
-static unsigned long mmap_base_legacy(void)
+static unsigned long mmap_base_legacy(unsigned long rnd)
{
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -84,7 +82,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
+ return STACK_TOP - stack_maxrandom_size() - rnd - gap;
}
unsigned long
@@ -179,40 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
-unsigned long randomize_et_dyn(void)
-{
- unsigned long base;
-
- base = STACK_TOP / 3 * 2;
- if (!is_32bit_task())
- /* Align to 4GB */
- base &= ~((1UL << 32) - 1);
- return base + mmap_rnd();
-}
-
-#ifndef CONFIG_64BIT
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-#else
-
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -273,15 +237,20 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
+ mm->mmap_base = mmap_base_legacy(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
}
}
@@ -317,5 +286,3 @@ static int __init setup_mmap_rnd(void)
return 0;
}
early_initcall(setup_mmap_rnd);
-
-#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d4..749c984 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
- if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
+ if (test_facility(13)) {
__ptep_ipte_range(address, nr - 1, pte);
return;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542..33f5894 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,14 +27,8 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#ifndef CONFIG_64BIT
-#define ALLOC_ORDER 1
-#define FRAG_MASK 0x0f
-#else
#define ALLOC_ORDER 2
#define FRAG_MASK 0x03
-#endif
-
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
free_pages((unsigned long) table, ALLOC_ORDER);
}
-#ifdef CONFIG_64BIT
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
if (current->active_mm == mm)
set_user_asce(mm);
}
-#endif
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2..ef7d6c8 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
-#ifdef CONFIG_64BIT
pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pud;
}
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
-#ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pmd;
}
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
-#ifdef CONFIG_64BIT
/* Use 1MB frames for vmemmap if available. We always
* use large frames even if they are only partially
* used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
-#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 524c4b6..1bd2301 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_64BIT) += hwsampler.o
+oprofile-y += hwsampler.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 9ffe645..bc927a0 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -21,8 +21,6 @@
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
-#ifdef CONFIG_64BIT
-
#include "hwsampler.h"
#include "op_counter.h"
@@ -495,14 +493,10 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown();
}
-#endif /* CONFIG_64BIT */
-
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
-#ifdef CONFIG_64BIT
-
/*
* -ENODEV is not reported to the caller. The module itself
* will use the timer mode sampling as fallback and this is
@@ -511,14 +505,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
hwsampler_available = oprofile_hwsampler_init(ops) == 0;
return 0;
-#else
- return -ENODEV;
-#endif
}
void oprofile_arch_exit(void)
{
-#ifdef CONFIG_64BIT
oprofile_hwsampler_exit();
-#endif
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f0b8544..9833620 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -780,8 +780,8 @@ static int zpci_scan_bus(struct zpci_dev *zdev)
zpci_cleanup_bus_resources(zdev);
return -EIO;
}
-
zdev->bus->max_bus_speed = zdev->max_bus_speed;
+ pci_bus_add_devices(zdev->bus);
return 0;
}
@@ -913,8 +913,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(2) || !test_facility(69)
- || !test_facility(71) || !test_facility(72))
+ if (!test_facility(69) || !test_facility(71) || !test_facility(72))
return 0;
rc = zpci_debug_init();
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index eb4ef27..50057fe 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -162,6 +162,10 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config PGTABLE_LEVELS
+ default 3 if X2TLB
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 1bc09ee..d5462b7 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -58,20 +58,23 @@ static void pcibios_scanbus(struct pci_channel *hose)
need_domain_info = need_domain_info || hose->index;
hose->need_domain_info = need_domain_info;
- if (bus) {
- next_busno = bus->busn_res.end + 1;
- /* Don't allow 8-bit bus number overflow inside the hose -
- reserve some space for bridges. */
- if (next_busno > 224) {
- next_busno = 0;
- need_domain_info = 1;
- }
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- } else {
+ if (!bus) {
pci_free_resource_list(&resources);
+ return;
+ }
+
+ next_busno = bus->busn_res.end + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
}
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
}
/*
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 67a049e..9d209a0 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -993,7 +993,7 @@ static struct unwinder dwarf_unwinder = {
.rating = 150,
};
-static void dwarf_unwinder_cleanup(void)
+static void __init dwarf_unwinder_cleanup(void)
{
struct dwarf_fde *fde, *next_fde;
struct dwarf_cie *cie, *next_cie;
@@ -1009,6 +1009,10 @@ static void dwarf_unwinder_cleanup(void)
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
kfree(cie);
+ if (dwarf_reg_pool)
+ mempool_destroy(dwarf_reg_pool);
+ if (dwarf_frame_pool)
+ mempool_destroy(dwarf_frame_pool);
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
}
@@ -1176,17 +1180,13 @@ static int __init dwarf_unwinder_init(void)
sizeof(struct dwarf_reg), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
- dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_frame_cachep);
+ dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
+ dwarf_frame_cachep);
if (!dwarf_frame_pool)
goto out;
- dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
- mempool_alloc_slab,
- mempool_free_slab,
- dwarf_reg_cachep);
+ dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
+ dwarf_reg_cachep);
if (!dwarf_reg_pool)
goto out;
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 0b34f2a..9729289 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -329,8 +329,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- set_fs(USER_DS);
-
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
@@ -408,8 +406,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
if (err)
return -EFAULT;
- set_fs(USER_DS);
-
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 71993c6..0462995 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -457,8 +457,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
- set_fs(USER_DS);
-
/* Broken %016Lx */
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
signal, current->comm, current->pid, frame,
@@ -547,8 +545,6 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
- set_fs(USER_DS);
-
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
signal, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96ac69c..e49502a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG
default "arch/sparc/configs/sparc32_defconfig" if SPARC32
default "arch/sparc/configs/sparc64_defconfig" if SPARC64
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+
config IOMMU_HELPER
bool
default y if SPARC64
@@ -143,6 +146,10 @@ config GENERIC_ISA_DMA
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
+config PGTABLE_LEVELS
+ default 4 if 64BIT
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725f..f5b6537 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
+
+#define HV_FAST_M7_GET_PERFREG 0x43
+#define HV_FAST_M7_SET_PERFREG 0x44
+
+#ifndef __ASSEMBLY__
+unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
+ unsigned long *reg_val);
+unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+#endif
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9b672be..50d4840 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr)
{
}
-#define ioread8(X) readb(X)
-#define ioread16(X) readw(X)
-#define ioread16be(X) __raw_readw(X)
-#define ioread32(X) readl(X)
-#define ioread32be(X) __raw_readl(X)
-#define iowrite8(val,X) writeb(val,X)
-#define iowrite16(val,X) writew(val,X)
-#define iowrite16be(val,X) __raw_writew(val,X)
-#define iowrite32(val,X) writel(val,X)
-#define iowrite32be(val,X) __raw_writel(val,X)
+#define ioread8 readb
+#define ioread16 readw
+#define ioread16be __raw_readw
+#define ioread32 readl
+#define ioread32be __raw_readl
+#define iowrite8 writeb
+#define iowrite16 writew
+#define iowrite16be __raw_writew
+#define iowrite32 writel
+#define iowrite32be __raw_writel
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index ec2e2e2..cc9b04a 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_SPARC_JUMP_LABEL_H
#define _ASM_SPARC_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -22,8 +22,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -32,4 +30,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index c100dc2..176fa0a 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -12,7 +12,6 @@
extern int this_is_starfire;
void check_if_starfire(void);
-int starfire_hard_smp_processor_id(void);
void starfire_hookup(int);
unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 88d322b..07cc49e 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs,
void do_privop(struct pt_regs *regs);
void do_privact(struct pt_regs *regs);
void do_cee(struct pt_regs *regs);
-void do_cee_tl1(struct pt_regs *regs);
-void do_dae_tl1(struct pt_regs *regs);
-void do_iae_tl1(struct pt_regs *regs);
void do_div0_tl1(struct pt_regs *regs);
-void do_fpdis_tl1(struct pt_regs *regs);
void do_fpieee_tl1(struct pt_regs *regs);
void do_fpother_tl1(struct pt_regs *regs);
void do_ill_tl1(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145..662500f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf83..afbaba5 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
retl
nop
ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_M7_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+ mov HV_FAST_M7_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 899b720..4371f72 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -34,15 +34,17 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info,
&resources);
- if (root_bus) {
- /* Setup IRQs of all devices using custom routines */
- pci_fixup_irqs(pci_common_swizzle, info->map_irq);
-
- /* Assign devices with resources */
- pci_assign_unassigned_resources();
- } else {
+ if (!root_bus) {
pci_free_resource_list(&resources);
+ return;
}
+
+ /* Setup IRQs of all devices using custom routines */
+ pci_fixup_irqs(pci_common_swizzle, info->map_irq);
+
+ /* Assign devices with resources */
+ pci_assign_unassigned_resources();
+ pci_bus_add_devices(root_bus);
}
void pcibios_fixup_bus(struct pci_bus *pbus)
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 99632a8..26c80e1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -130,26 +130,26 @@ static struct mdesc_mem_ops memblock_mdesc_ops = {
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
+ struct mdesc_handle *hp;
+ unsigned long addr;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ /*
+ * Allocation has to succeed because mdesc update would be missed
+ * and such events are not retransmitted.
+ */
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
- if (base) {
- struct mdesc_handle *hp;
- unsigned long addr;
-
- addr = (unsigned long)base;
- addr = (addr + 15UL) & ~15UL;
- hp = (struct mdesc_handle *) addr;
+ addr = (unsigned long)base;
+ addr = (addr + 15UL) & ~15UL;
+ hp = (struct mdesc_handle *) addr;
- mdesc_handle_init(hp, handle_size, base);
- return hp;
- }
+ mdesc_handle_init(hp, handle_size, base);
- return NULL;
+ return hp;
}
static void mdesc_kfree(struct mdesc_handle *hp)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 9ce5afe..6f7251f 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- if (pci_claim_resource(dev, i) == 0)
- continue;
-
- pci_claim_bridge_resource(dev, i);
+ pci_claim_resource(dev, i);
}
}
@@ -677,11 +674,10 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
}
pci_of_scan_bus(pbm, node, bus);
- pci_bus_add_devices(bus);
pci_bus_register_of_sysfs(bus);
pci_claim_bus_resources(bus);
-
+ pci_bus_add_devices(bus);
return bus;
}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 6cc78c2..24384e1 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -391,12 +391,16 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
struct linux_pbm_info *pbm = &pcic->pbm;
pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
+ if (!pbm->pci_bus)
+ return;
+
#if 0 /* deadwood transplanted from sparc64 */
pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
pci_record_assignments(pbm, pbm->pci_bus);
pci_assign_unassigned(pbm, pbm->pci_bus);
pci_fixup_irq(pbm, pbm->pci_bus);
#endif
+ pci_bus_add_devices(pbm->pci_bus);
}
/*
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8..eb978c7 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_m7_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+ .read_pcr = m7_pcr_read,
+ .write_pcr = m7_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_T5_CPU;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ perf_hsvc_group = HV_GRP_M7_PERF;
+ break;
+
default:
return -ENODEV;
}
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n5_pcr_ops;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ pcr_ops = &m7_pcr_ops;
+ break;
+
default:
ret = -ENODEV;
break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e45..86eebfa 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
+static void sparc_m7_write_pmc(int idx, u64 val)
+{
+ u64 pcr;
+
+ pcr = pcr_ops->read_pcr(idx);
+ /* ensure ov and ntc are reset */
+ pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
+
+ pcr_ops->write_pic(idx, val & 0xffffffff);
+
+ pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu sparc_m7_pmu = {
+ .event_map = niagara4_event_map,
+ .cache_map = &niagara4_cache_map,
+ .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
+ .read_pmc = sparc_vt_read_pmc,
+ .write_pmc = sparc_m7_write_pmc,
+ .upper_shift = 5,
+ .lower_shift = 5,
+ .event_mask = 0x7ff,
+ .user_bit = PCR_N4_UTRACE,
+ .priv_bit = PCR_N4_STRACE,
+
+ /* We explicitly don't support hypervisor tracing. */
+ .hv_bit = 0,
+
+ .irq_bit = PCR_N4_TOE,
+ .upper_nop = 0,
+ .lower_nop = 0,
+ .flags = 0,
+ .max_hw_events = 4,
+ .num_pcrs = 4,
+ .num_pic_regs = 4,
+};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
/* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
- u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
- sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
- enc = perf_event_get_enc(cpuc->events[i]);
- cpuc->pcr[idx] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
- cpuc->pcr[idx] |= nop_for_index(idx);
- else
- cpuc->pcr[idx] |= event_encoding(enc, idx);
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
int i;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara4_pmu;
return true;
}
+ if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+ sparc_pmu = &sparc_m7_pmu;
+ return true;
+ }
return false;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf9..46a5964 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index da6f1a7..61139d9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
scheduler_ipi();
}
-/* This is a nop because we capture all other cpus
- * anyways when making the PROM active.
- */
+static void stop_this_cpu(void *dummy)
+{
+ prom_stopself();
+}
+
void smp_send_stop(void)
{
+ int cpu;
+
+ if (tlb_type == hypervisor) {
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled) {
+ unsigned long hv_err;
+ hv_err = sun4v_cpu_stop(cpu);
+ if (hv_err)
+ printk(KERN_ERR "sun4v_cpu_stop() "
+ "failed err=%lu\n", hv_err);
+ } else
+#endif
+ prom_stopcpu_cpuid(cpu);
+ }
+ } else
+ smp_call_function(stop_this_cpu, NULL, 0);
}
/**
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a5..167fdfd 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -28,11 +28,6 @@ void check_if_starfire(void)
this_is_starfire = 1;
}
-int starfire_hard_smp_processor_id(void)
-{
- return upa_readl(0x1fff40000d0UL);
-}
-
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c85403d..30e7ddb 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
long err;
/* No need for backward compatibility. We can start fresh... */
- if (call <= SEMCTL) {
+ if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
err = sys_semtimedop(first, ptr,
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 2f80d23..18147a5 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -181,17 +181,13 @@ static struct clocksource timer_cs = {
.rating = 100,
.read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 2,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static __init int setup_timer_cs(void)
{
timer_cs_enabled = 1;
- timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
- timer_cs.shift);
-
- return clocksource_register(&timer_cs);
+ return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
}
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index a27651e..0e69974 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
+ if (panic_on_oops)
+ panic("Fatal exception");
if (regs->tstate & TSTATE_PRIV)
do_exit(SIGKILL);
do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs)
die_if_kernel("TL0: Cache Error Exception", regs);
}
-void do_cee_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Cache Error Exception", regs);
-}
-
-void do_dae_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Data Access Exception", regs);
-}
-
-void do_iae_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Instruction Access Exception", regs);
-}
-
void do_div0_tl1(struct pt_regs *regs)
{
exception_enter();
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs)
die_if_kernel("TL1: DIV0 Exception", regs);
}
-void do_fpdis_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: FPU Disabled", regs);
-}
-
void do_fpieee_tl1(struct pt_regs *regs)
{
exception_enter();
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334..857ad4f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
- mov %o0, %g1
+ brz,pn %o2, 99f
+ mov %o0, %g1
+
cmp %o0, %o1
- bleu,pt %xcc, memcpy
+ bleu,pt %xcc, 2f
add %o1, %o2, %g7
cmp %g7, %o0
bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
-
+99:
retl
mov %g1, %o0
+
+ /* We can't just call memcpy for these memmove cases. On some
+ * chips the memcpy uses cache initializing stores and when dst
+ * and src are close enough, those can clobber the source data
+ * before we've loaded it in.
+ */
+2: or %o0, %o1, %g7
+ or %o2, %g7, %g7
+ andcc %g7, 0x7, %g0
+ bne,pn %xcc, 4f
+ nop
+
+3: ldx [%o1], %g7
+ add %o1, 8, %o1
+ subcc %o2, 8, %o2
+ add %o0, 8, %o0
+ bne,pt %icc, 3b
+ stx %g7, [%o0 - 0x8]
+ ba,a,pt %xcc, 99b
+
+4: ldub [%o1], %g7
+ add %o1, 1, %o1
+ subcc %o2, 1, %o2
+ add %o0, 1, %o0
+ bne,pt %icc, 4b
+ stb %g7, [%o0 - 0x1]
+ ba,a,pt %xcc, 99b
ENDPROC(memmove)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3ea267c..4ca0d6b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2820,7 +2820,7 @@ static int __init report_memory(void)
return 0;
}
-device_initcall(report_memory);
+arch_initcall(report_memory);
#ifdef CONFIG_SMP
#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 7cca418..0142d57 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -147,6 +147,11 @@ config ARCH_DEFCONFIG
default "arch/tile/configs/tilepro_defconfig" if !TILEGX
default "arch/tile/configs/tilegx_defconfig" if TILEGX
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 325df47..9475a74 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -339,6 +339,8 @@ int __init pcibios_init(void)
struct pci_bus *next_bus;
struct pci_dev *dev;
+ pci_bus_add_devices(root_bus);
+
list_for_each_entry(dev, &root_bus->devices, bus_list) {
/*
* Find the PCI host controller, ie. the 1st
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 2c95f37..b1df847 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1030,6 +1030,8 @@ int __init pcibios_init(void)
alloc_mem_map_failed:
break;
}
+
+ pci_bus_add_devices(root_bus);
}
return 0;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d412b08..00178ec 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -257,34 +257,34 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timekeeper *tk)
{
- if (tk->tkr.clock != &cycle_counter_cs)
+ if (tk->tkr_mono.clock != &cycle_counter_cs)
return;
write_seqcount_begin(&vdso_data->tb_seq);
- vdso_data->cycle_last = tk->tkr.cycle_last;
- vdso_data->mask = tk->tkr.mask;
- vdso_data->mult = tk->tkr.mult;
- vdso_data->shift = tk->tkr.shift;
+ vdso_data->cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->mask = tk->tkr_mono.mask;
+ vdso_data->mult = tk->tkr_mono.mult;
+ vdso_data->shift = tk->tkr_mono.shift;
vdso_data->wall_time_sec = tk->xtime_sec;
- vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
+ vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdso_data->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
- vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
+ vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr.shift);
+ << tk->tkr_mono.shift);
while (vdso_data->monotonic_time_snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
+ (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdso_data->monotonic_time_snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr.shift;
+ ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdso_data->monotonic_time_sec++;
}
vdso_data->wall_time_coarse_sec = tk->xtime_sec;
- vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
- tk->tkr.shift);
+ vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->monotonic_time_coarse_sec =
vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index a7520c9..5dbfe3d 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -155,3 +155,8 @@ config MMAPPER
config NO_DMA
def_bool y
+
+config PGTABLE_LEVELS
+ int
+ default 3 if 3_LEVEL_PGTABLES
+ default 2
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index 374a055..d45fa5f 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -266,17 +266,10 @@ static int __init pci_common_init(void)
pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
if (!pci_has_flag(PCI_PROBE_ONLY)) {
- /*
- * Size the bridge windows.
- */
pci_bus_size_bridges(puv3_bus);
-
- /*
- * Assign resources.
- */
pci_bus_assign_resources(puv3_bus);
}
-
+ pci_bus_add_devices(puv3_bus);
return 0;
}
subsys_initcall(pci_common_init);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b7d31ca..d43e7e1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -87,7 +87,7 @@ config X86
select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_USER_RETURN_NOTIFIER
- select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select SPARSE_IRQ
@@ -99,6 +99,7 @@ config X86
select IRQ_FORCED_THREADING
select HAVE_BPF_JIT if X86_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
select ARCH_HAS_SG_CHAIN
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -235,12 +236,10 @@ config ARCH_WANT_GENERAL_HUGETLB
def_bool y
config ZONE_DMA32
- bool
- default X86_64
+ def_bool y if X86_64
config AUDIT_ARCH
- bool
- default X86_64
+ def_bool y if X86_64
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y
@@ -279,6 +278,12 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM
def_bool y
+config PGTABLE_LEVELS
+ int
+ default 4 if X86_64
+ default 3 if X86_PAE
+ default 2
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -716,17 +721,6 @@ endif #HYPERVISOR_GUEST
config NO_BOOTMEM
def_bool y
-config MEMTEST
- bool "Memtest"
- ---help---
- This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
- memtest=0, mean disabled; -- default
- memtest=1, mean do 1 test pattern;
- ...
- memtest=4, mean do 4 test patterns.
- If you are unsure how to answer this question, answer N.
-
source "arch/x86/Kconfig.cpu"
config HPET_TIMER
@@ -891,7 +885,8 @@ config UP_LATE_INIT
depends on !SMP && X86_LOCAL_APIC
config X86_UP_APIC
- bool "Local APIC support on uniprocessors"
+ bool "Local APIC support on uniprocessors" if !PCI_MSI
+ default PCI_MSI
depends on X86_32 && !SMP && !X86_32_NON_STANDARD
---help---
A local APIC (Advanced Programmable Interrupt Controller) is an
@@ -903,10 +898,6 @@ config X86_UP_APIC
performance counters), and the NMI watchdog which detects hard
lockups.
-config X86_UP_APIC_MSI
- def_bool y
- select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
-
config X86_UP_IOAPIC
bool "IO-APIC support on uniprocessors"
depends on X86_UP_APIC
@@ -925,8 +916,8 @@ config X86_LOCAL_APIC
select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
config X86_IO_APIC
- def_bool X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
- depends on X86_LOCAL_APIC
+ def_bool y
+ depends on X86_LOCAL_APIC || X86_UP_IOAPIC
select IRQ_DOMAIN
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -1145,10 +1136,10 @@ config MICROCODE_OLD_INTERFACE
depends on MICROCODE
config MICROCODE_INTEL_EARLY
- def_bool n
+ bool
config MICROCODE_AMD_EARLY
- def_bool n
+ bool
config MICROCODE_EARLY
bool "Early load microcode"
@@ -1300,14 +1291,14 @@ config ARCH_DMA_ADDR_T_64BIT
def_bool y
depends on X86_64 || HIGHMEM64G
-config DIRECT_GBPAGES
- bool "Enable 1GB pages for kernel pagetables" if EXPERT
- default y
- depends on X86_64
+config X86_DIRECT_GBPAGES
+ def_bool y
+ depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK
---help---
- Allow the kernel linear mapping to use 1GB pages on CPUs that
- support it. This can improve the kernel's performance a tiny bit by
- reducing TLB pressure. If in doubt, say "Y".
+ Certain kernel features effectively disable kernel
+ linear 1 GB mappings (even if the CPU otherwise
+ supports them), so don't confuse the user by printing
+ that we have them enabled.
# Common NUMA Features
config NUMA
@@ -1747,14 +1738,11 @@ config KEXEC_VERIFY_SIG
depends on KEXEC_FILE
---help---
This option makes kernel signature verification mandatory for
- kexec_file_load() syscall. If kernel is signature can not be
- verified, kexec_file_load() will fail.
-
- This option enforces signature verification at generic level.
- One needs to enable signature verification for type of kernel
- image being loaded to make sure it works. For example, enable
- bzImage signature verification option to be able to load and
- verify signatures of bzImage. Otherwise kernel loading will fail.
+ the kexec_file_load() syscall.
+
+ In addition to that option, you need to enable signature
+ verification for the corresponding kernel image type being
+ loaded in order for this to work.
config KEXEC_BZIMAGE_VERIFY_SIG
bool "Enable bzImage signature verification support"
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 7083c16..d7b1f65 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -14,13 +14,6 @@
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-struct kaslr_setup_data {
- __u64 next;
- __u32 type;
- __u32 len;
- __u8 data[1];
-} kaslr_setup_data;
-
#define I8254_PORT_CONTROL 0x43
#define I8254_PORT_COUNTER0 0x40
#define I8254_CMD_READBACK 0xC0
@@ -302,28 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random();
}
-static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
-{
- struct setup_data *data;
-
- kaslr_setup_data.type = SETUP_KASLR;
- kaslr_setup_data.len = 1;
- kaslr_setup_data.next = 0;
- kaslr_setup_data.data[0] = enabled;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- if (data)
- data->next = (unsigned long)&kaslr_setup_data;
- else
- params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
-
-}
-
-unsigned char *choose_kernel_location(struct boot_params *params,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size,
unsigned char *output,
@@ -335,17 +307,16 @@ unsigned char *choose_kernel_location(struct boot_params *params,
#ifdef CONFIG_HIBERNATION
if (!cmdline_find_option_bool("kaslr")) {
debug_putstr("KASLR disabled by default...\n");
- add_kaslr_setup_data(params, 0);
goto out;
}
#else
if (cmdline_find_option_bool("nokaslr")) {
debug_putstr("KASLR disabled by cmdline...\n");
- add_kaslr_setup_data(params, 0);
goto out;
}
#endif
- add_kaslr_setup_data(params, 1);
+
+ boot_params->hdr.loadflags |= KASLR_FLAG;
/* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 1d7fbbc..8ef964d 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -29,6 +29,7 @@
#include <asm/page_types.h>
#include <asm/boot.h>
#include <asm/asm-offsets.h>
+#include <asm/bootparam.h>
__HEAD
ENTRY(startup_32)
@@ -102,7 +103,7 @@ preferred_addr:
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments
*/
- testb $(1<<6), BP_loadflags(%esi)
+ testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 1f
cli
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 6b1766c..b0c0d16 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -31,6 +31,7 @@
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include <asm/asm-offsets.h>
+#include <asm/bootparam.h>
__HEAD
.code32
@@ -46,7 +47,7 @@ ENTRY(startup_32)
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments
*/
- testb $(1<<6), BP_loadflags(%esi)
+ testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 1f
cli
@@ -164,7 +165,7 @@ ENTRY(startup_32)
/* After gdt is loaded */
xorl %eax, %eax
lldt %ax
- movl $0x20, %eax
+ movl $__BOOT_TSS, %eax
ltr %ax
/*
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 5903089..a107b93 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode;
+ /* Clear it for solely in-kernel use */
+ real_mode->hdr.loadflags &= ~KASLR_FLAG;
+
sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) {
@@ -401,8 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections.
*/
- output = choose_kernel_location(real_mode, input_data, input_len,
- output,
+ output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len
: run_size);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index ee3576b..89dd0d7 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -57,7 +57,7 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE
/* aslr.c */
-unsigned char *choose_kernel_location(struct boot_params *params,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size,
unsigned char *output,
@@ -66,7 +66,7 @@ unsigned char *choose_kernel_location(struct boot_params *params,
bool has_cpuflag(int flag);
#else
static inline
-unsigned char *choose_kernel_location(struct boot_params *params,
+unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size,
unsigned char *output,
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 493f3fd..318b846 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -30,7 +30,7 @@ int strcmp(const char *str1, const char *str2)
int delta = 0;
while (*s1 || *s2) {
- delta = *s2 - *s1;
+ delta = *s1 - *s2;
if (delta)
return delta;
s1++;
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index 748e8d0..aa8a96b 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -22,10 +22,8 @@
/*
* Common variables
*/
-int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
-u16 video_segment;
+int adapter; /* 0=CGA/MDA/HGC, 1=EGA, 2=VGA+ */
int force_x, force_y; /* Don't query the BIOS for cols/rows */
-
int do_restore; /* Screen contents changed during mode flip */
int graphic_mode; /* Graphic mode with linear frame buffer */
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 43eda28..05111bb 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -17,6 +17,8 @@
#include "video.h"
#include "vesa.h"
+static u16 video_segment;
+
static void store_cursor_position(void)
{
struct biosregs ireg, oreg;
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
index 0bb2549..b54e032 100644
--- a/arch/x86/boot/video.h
+++ b/arch/x86/boot/video.h
@@ -91,7 +91,6 @@ int mode_defined(u16 mode); /* video.c */
#define ADAPTER_VGA 2
extern int adapter;
-extern u16 video_segment;
extern int force_x, force_y; /* Don't query the BIOS for cols/rows */
extern int do_restore; /* Restore screen contents */
extern int graphic_mode; /* Graphics mode with linear frame buffer */
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 419819d..aaa1118 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -248,7 +248,7 @@ CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4c311dd..315b861 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -243,7 +243,7 @@ CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 947c6bf..54f60ab 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
if (!src)
return -ENOMEM;
- assoc = (src + req->cryptlen + auth_tag_len);
+ assoc = (src + req->cryptlen);
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
scatterwalk_map_and_copy(assoc, req->assoc, 0,
req->assoclen, 0);
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0);
} else {
- scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+ scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
kfree(src);
}
return retval;
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 26d49eb..225be06 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -178,7 +178,7 @@ continue_block:
## 2a) PROCESS FULL BLOCKS:
################################################################
full_block:
- movq $128,%rax
+ movl $128,%eax
lea 128*8*2(block_0), block_1
lea 128*8*3(block_0), block_2
add $128*8*1, block_0
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index a039d21..a350c99 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -264,7 +264,7 @@ ENTRY(twofish_enc_blk)
movq R1, 8(%rsi)
popq R1
- movq $1,%rax
+ movl $1,%eax
ret
ENDPROC(twofish_enc_blk)
@@ -316,6 +316,6 @@ ENTRY(twofish_dec_blk)
movq R1, 8(%rsi)
popq R1
- movq $1,%rax
+ movl $1,%eax
ret
ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index e785b42..bb635c6 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -3,7 +3,6 @@
#
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
-obj-$(CONFIG_IA32_EMULATION) += nosyscall.o syscall_ia32.o
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index d0165c9..c81d35e6 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -161,8 +161,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
}
static int ia32_restore_sigcontext(struct pt_regs *regs,
- struct sigcontext_ia32 __user *sc,
- unsigned int *pax)
+ struct sigcontext_ia32 __user *sc)
{
unsigned int tmpflags, err = 0;
void __user *buf;
@@ -184,7 +183,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
RELOAD_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
- COPY(dx); COPY(cx); COPY(ip);
+ COPY(dx); COPY(cx); COPY(ip); COPY(ax);
/* Don't touch extended registers */
COPY_SEG_CPL3(cs);
@@ -197,12 +196,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
get_user_ex(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
-
- get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= restore_xstate_sig(buf, 1);
+ force_iret();
+
return err;
}
@@ -211,7 +210,6 @@ asmlinkage long sys32_sigreturn(void)
struct pt_regs *regs = current_pt_regs();
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
sigset_t set;
- unsigned int ax;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -224,9 +222,9 @@ asmlinkage long sys32_sigreturn(void)
set_current_blocked(&set);
- if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
+ if (ia32_restore_sigcontext(regs, &frame->sc))
goto badframe;
- return ax;
+ return regs->ax;
badframe:
signal_fault(regs, frame, "32bit sigreturn");
@@ -238,7 +236,6 @@ asmlinkage long sys32_rt_sigreturn(void)
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_ia32 __user *frame;
sigset_t set;
- unsigned int ax;
frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
@@ -249,13 +246,13 @@ asmlinkage long sys32_rt_sigreturn(void)
set_current_blocked(&set);
- if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- return ax;
+ return regs->ax;
badframe:
signal_fault(regs, frame, "32bit rt sigreturn");
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 156ebca..a821b1c 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -30,24 +30,13 @@
.section .entry.text, "ax"
- .macro IA32_ARG_FIXUP noebp=0
- movl %edi,%r8d
- .if \noebp
- .else
- movl %ebp,%r9d
- .endif
- xchg %ecx,%esi
- movl %ebx,%edi
- movl %edx,%edx /* zero extension */
- .endm
-
- /* clobbers %eax */
- .macro CLEAR_RREGS offset=0, _r9=rax
+ /* clobbers %rax */
+ .macro CLEAR_RREGS _r9=rax
xorl %eax,%eax
- movq %rax,\offset+R11(%rsp)
- movq %rax,\offset+R10(%rsp)
- movq %\_r9,\offset+R9(%rsp)
- movq %rax,\offset+R8(%rsp)
+ movq %rax,R11(%rsp)
+ movq %rax,R10(%rsp)
+ movq %\_r9,R9(%rsp)
+ movq %rax,R8(%rsp)
.endm
/*
@@ -60,14 +49,14 @@
* If it's -1 to make us punt the syscall, then (u32)-1 is still
* an appropriately invalid value.
*/
- .macro LOAD_ARGS32 offset, _r9=0
+ .macro LOAD_ARGS32 _r9=0
.if \_r9
- movl \offset+16(%rsp),%r9d
+ movl R9(%rsp),%r9d
.endif
- movl \offset+40(%rsp),%ecx
- movl \offset+48(%rsp),%edx
- movl \offset+56(%rsp),%esi
- movl \offset+64(%rsp),%edi
+ movl RCX(%rsp),%ecx
+ movl RDX(%rsp),%edx
+ movl RSI(%rsp),%esi
+ movl RDI(%rsp),%edi
movl %eax,%eax /* zero extension */
.endm
@@ -99,54 +88,69 @@ ENDPROC(native_irq_enable_sysexit)
/*
* 32bit SYSENTER instruction entry.
*
+ * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
+ * IF and VM in rflags are cleared (IOW: interrupts are off).
+ * SYSENTER does not save anything on the stack,
+ * and does not save old rip (!!!) and rflags.
+ *
* Arguments:
- * %eax System call number.
- * %ebx Arg1
- * %ecx Arg2
- * %edx Arg3
- * %esi Arg4
- * %edi Arg5
- * %ebp user stack
- * 0(%ebp) Arg6
- *
- * Interrupts off.
- *
+ * eax system call number
+ * ebx arg1
+ * ecx arg2
+ * edx arg3
+ * esi arg4
+ * edi arg5
+ * ebp user stack
+ * 0(%ebp) arg6
+ *
* This is purely a fast path. For anything complicated we use the int 0x80
- * path below. Set up a complete hardware stack frame to share code
+ * path below. We set up a complete hardware stack frame to share code
* with the int 0x80 path.
- */
+ */
ENTRY(ia32_sysenter_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
- SWAPGS_UNSAFE_STACK
- movq PER_CPU_VAR(kernel_stack), %rsp
- addq $(KERNEL_STACK_OFFSET),%rsp
+
/*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs, here we enable it straight after entry:
+ * Interrupts are off on entry.
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
*/
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
ENABLE_INTERRUPTS(CLBR_NONE)
- movl %ebp,%ebp /* zero extension */
- pushq_cfi $__USER32_DS
- /*CFI_REL_OFFSET ss,0*/
- pushq_cfi %rbp
- CFI_REL_OFFSET rsp,0
- pushfq_cfi
- /*CFI_REL_OFFSET rflags,0*/
- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
- CFI_REGISTER rip,r10
- pushq_cfi $__USER32_CS
- /*CFI_REL_OFFSET cs,0*/
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %ebp, %ebp
movl %eax, %eax
- pushq_cfi %r10
- CFI_REL_OFFSET rip,0
- pushq_cfi %rax
+
+ movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
+ CFI_REGISTER rip,r10
+
+ /* Construct struct pt_regs on stack */
+ pushq_cfi $__USER32_DS /* pt_regs->ss */
+ pushq_cfi %rbp /* pt_regs->sp */
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi /* pt_regs->flags */
+ pushq_cfi $__USER32_CS /* pt_regs->cs */
+ pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
+ CFI_REL_OFFSET rip,0
+ pushq_cfi_reg rax /* pt_regs->orig_ax */
+ pushq_cfi_reg rdi /* pt_regs->di */
+ pushq_cfi_reg rsi /* pt_regs->si */
+ pushq_cfi_reg rdx /* pt_regs->dx */
+ pushq_cfi_reg rcx /* pt_regs->cx */
+ pushq_cfi_reg rax /* pt_regs->ax */
cld
- SAVE_ARGS 0,1,0
- /* no need to do an access_ok check here because rbp has been
- 32bit zero extended */
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
+ /*
+ * no need to do an access_ok check here because rbp has been
+ * 32bit zero extended
+ */
ASM_STAC
1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg)
@@ -157,42 +161,80 @@ ENTRY(ia32_sysenter_target)
* ourselves. To save a few cycles, we can check whether
* NT was set instead of doing an unconditional popfq.
*/
- testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
+ testl $X86_EFLAGS_NT,EFLAGS(%rsp)
jnz sysenter_fix_flags
sysenter_flags_fixed:
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
sysenter_do_call:
- IA32_ARG_FIXUP
+ /* 32bit syscall -> 64bit C ABI argument conversion */
+ movl %edi,%r8d /* arg5 */
+ movl %ebp,%r9d /* arg6 */
+ xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
+ movl %ebx,%edi /* arg1 */
+ movl %edx,%edx /* arg3 (zero extension) */
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
+ movq %rax,RAX(%rsp)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz sysexit_audit
sysexit_from_sys_call:
- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- /* clear IF, that popfq doesn't enable interrupts early */
- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
- movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
- CFI_REGISTER rip,rdx
- RESTORE_ARGS 0,24,0,0,0,0
+ /*
+ * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
+ * NMI between STI and SYSEXIT has poorly specified behavior,
+ * and and NMI followed by an IRQ with usergs is fatal. So
+ * we just pretend we're using SYSEXIT but we really use
+ * SYSRETL instead.
+ *
+ * This code path is still called 'sysexit' because it pairs
+ * with 'sysenter' and it uses the SYSENTER calling convention.
+ */
+ andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ movl RIP(%rsp),%ecx /* User %eip */
+ CFI_REGISTER rip,rcx
+ RESTORE_RSI_RDI
+ xorl %edx,%edx /* avoid info leaks */
xorq %r8,%r8
xorq %r9,%r9
xorq %r10,%r10
- xorq %r11,%r11
- popfq_cfi
+ movl EFLAGS(%rsp),%r11d /* User eflags */
/*CFI_RESTORE rflags*/
- popq_cfi %rcx /* User %esp */
- CFI_REGISTER rsp,rcx
TRACE_IRQS_ON
- ENABLE_INTERRUPTS_SYSEXIT32
+
+ /*
+ * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
+ * since it avoids a dicey window with interrupts enabled.
+ */
+ movl RSP(%rsp),%esp
+
+ /*
+ * USERGS_SYSRET32 does:
+ * gsbase = user's gs base
+ * eip = ecx
+ * rflags = r11
+ * cs = __USER32_CS
+ * ss = __USER_DS
+ *
+ * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
+ *
+ * pop %ebp
+ * pop %edx
+ * pop %ecx
+ *
+ * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
+ * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
+ * address (already known to user code), and R12-R15 are
+ * callee-saved and therefore don't contain any interesting
+ * kernel data.
+ */
+ USERGS_SYSRET32
CFI_RESTORE_STATE
@@ -205,18 +247,18 @@ sysexit_from_sys_call:
movl %ebx,%esi /* 2nd arg: 1st syscall arg */
movl %eax,%edi /* 1st arg: syscall number */
call __audit_syscall_entry
- movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ movl RAX(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
movl %ebx,%edi /* reload 1st syscall arg */
- movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
- movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */
- movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */
- movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */
+ movl RCX(%rsp),%esi /* reload 2nd syscall arg */
+ movl RDX(%rsp),%edx /* reload 3rd syscall arg */
+ movl RSI(%rsp),%ecx /* reload 4th syscall arg */
+ movl RDI(%rsp),%r8d /* reload 5th syscall arg */
.endm
.macro auditsys_exit exit
- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
@@ -227,13 +269,13 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
call __audit_syscall_exit
- movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
+ movq RAX(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz \exit
- CLEAR_RREGS -ARGOFFSET
+ CLEAR_RREGS
jmp int_with_check
.endm
@@ -253,16 +295,16 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz sysenter_auditsys
#endif
- SAVE_REST
+ SAVE_EXTRA_REGS
CLEAR_RREGS
movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
+ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
+ RESTORE_EXTRA_REGS
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
jmp sysenter_do_call
@@ -272,94 +314,128 @@ ENDPROC(ia32_sysenter_target)
/*
* 32bit SYSCALL instruction entry.
*
+ * 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
+ * then loads new ss, cs, and rip from previously programmed MSRs.
+ * rflags gets masked by a value from another MSR (so CLD and CLAC
+ * are not needed). SYSCALL does not save anything on the stack
+ * and does not change rsp.
+ *
+ * Note: rflags saving+masking-with-MSR happens only in Long mode
+ * (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it).
+ * Don't get confused: rflags saving+masking depends on Long Mode Active bit
+ * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
+ * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
+ *
* Arguments:
- * %eax System call number.
- * %ebx Arg1
- * %ecx return EIP
- * %edx Arg3
- * %esi Arg4
- * %edi Arg5
- * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
- * %esp user stack
- * 0(%esp) Arg6
- *
- * Interrupts off.
- *
+ * eax system call number
+ * ecx return address
+ * ebx arg1
+ * ebp arg2 (note: not saved in the stack frame, should not be touched)
+ * edx arg3
+ * esi arg4
+ * edi arg5
+ * esp user stack
+ * 0(%esp) arg6
+ *
* This is purely a fast path. For anything complicated we use the int 0x80
- * path below. Set up a complete hardware stack frame to share code
- * with the int 0x80 path.
- */
+ * path below. We set up a complete hardware stack frame to share code
+ * with the int 0x80 path.
+ */
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
+ CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
+
+ /*
+ * Interrupts are off on entry.
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
+ */
SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq PER_CPU_VAR(kernel_stack),%rsp
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
- */
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_ARGS 8,0,0
- movl %eax,%eax /* zero extension */
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
- movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax,%eax
+
+ /* Construct struct pt_regs on stack */
+ pushq_cfi $__USER32_DS /* pt_regs->ss */
+ pushq_cfi %r8 /* pt_regs->sp */
+ CFI_REL_OFFSET rsp,0
+ pushq_cfi %r11 /* pt_regs->flags */
+ pushq_cfi $__USER32_CS /* pt_regs->cs */
+ pushq_cfi %rcx /* pt_regs->ip */
+ CFI_REL_OFFSET rip,0
+ pushq_cfi_reg rax /* pt_regs->orig_ax */
+ pushq_cfi_reg rdi /* pt_regs->di */
+ pushq_cfi_reg rsi /* pt_regs->si */
+ pushq_cfi_reg rdx /* pt_regs->dx */
+ pushq_cfi_reg rbp /* pt_regs->cx */
movl %ebp,%ecx
- movq $__USER32_CS,CS-ARGOFFSET(%rsp)
- movq $__USER32_DS,SS-ARGOFFSET(%rsp)
- movq %r11,EFLAGS-ARGOFFSET(%rsp)
- /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
- movq %r8,RSP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rsp,RSP-ARGOFFSET
- /* no need to do an access_ok check here because r8 has been
- 32bit zero extended */
- /* hardware stack frame is complete now */
+ pushq_cfi_reg rax /* pt_regs->ax */
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
+ /*
+ * no need to do an access_ok check here because r8 has been
+ * 32bit zero extended
+ */
ASM_STAC
1: movl (%r8),%r9d
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
ja ia32_badsys
cstar_do_call:
- IA32_ARG_FIXUP 1
+ /* 32bit syscall -> 64bit C ABI argument conversion */
+ movl %edi,%r8d /* arg5 */
+ /* r9 already loaded */ /* arg6 */
+ xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
+ movl %ebx,%edi /* arg1 */
+ movl %edx,%edx /* arg3 (zero extension) */
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
+ movq %rax,RAX(%rsp)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz sysretl_audit
sysretl_from_sys_call:
- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
- movl RIP-ARGOFFSET(%rsp),%ecx
+ andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ RESTORE_RSI_RDI_RDX
+ movl RIP(%rsp),%ecx
CFI_REGISTER rip,rcx
- movl EFLAGS-ARGOFFSET(%rsp),%r11d
+ movl EFLAGS(%rsp),%r11d
/*CFI_REGISTER rflags,r11*/
xorq %r10,%r10
xorq %r9,%r9
xorq %r8,%r8
TRACE_IRQS_ON
- movl RSP-ARGOFFSET(%rsp),%esp
+ movl RSP(%rsp),%esp
CFI_RESTORE rsp
+ /*
+ * 64bit->32bit SYSRET restores eip from ecx,
+ * eflags from r11 (but RF and VM bits are forced to 0),
+ * cs and ss are loaded from MSRs.
+ * (Note: 32bit->32bit SYSRET is different: since r11
+ * does not exist, it merely sets eflags.IF=1).
+ */
USERGS_SYSRET32
-
+
#ifdef CONFIG_AUDITSYSCALL
cstar_auditsys:
CFI_RESTORE_STATE
- movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */
+ movl %r9d,R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common
- movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */
+ movl R9(%rsp),%r9d /* reload 6th syscall arg */
jmp cstar_dispatch
sysretl_audit:
@@ -368,17 +444,17 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
- SAVE_REST
- CLEAR_RREGS 0, r9
+ SAVE_EXTRA_REGS
+ CLEAR_RREGS r9
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
- LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
- RESTORE_REST
+ LOAD_ARGS32 1 /* reload args from stack in case ptrace changed it */
+ RESTORE_EXTRA_REGS
xchgl %ebp,%r9d
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
@@ -391,78 +467,94 @@ ia32_badarg:
jmp ia32_sysret
CFI_ENDPROC
-/*
- * Emulated IA32 system calls via int 0x80.
+/*
+ * Emulated IA32 system calls via int 0x80.
*
- * Arguments:
- * %eax System call number.
- * %ebx Arg1
- * %ecx Arg2
- * %edx Arg3
- * %esi Arg4
- * %edi Arg5
- * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
+ * Arguments:
+ * eax system call number
+ * ebx arg1
+ * ecx arg2
+ * edx arg3
+ * esi arg4
+ * edi arg5
+ * ebp arg6 (note: not saved in the stack frame, should not be touched)
*
* Notes:
- * Uses the same stack frame as the x86-64 version.
- * All registers except %eax must be saved (but ptrace may violate that)
+ * Uses the same stack frame as the x86-64 version.
+ * All registers except eax must be saved (but ptrace may violate that).
* Arguments are zero extended. For system calls that want sign extension and
* take long arguments a wrapper is needed. Most calls can just be called
* directly.
- * Assumes it is only called from user space and entered with interrupts off.
- */
+ * Assumes it is only called from user space and entered with interrupts off.
+ */
ENTRY(ia32_syscall)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,SS+8-RIP
- /*CFI_REL_OFFSET ss,SS-RIP*/
- CFI_REL_OFFSET rsp,RSP-RIP
- /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
- /*CFI_REL_OFFSET cs,CS-RIP*/
- CFI_REL_OFFSET rip,RIP-RIP
- PARAVIRT_ADJUST_EXCEPTION_FRAME
- SWAPGS
+ CFI_DEF_CFA rsp,5*8
+ /*CFI_REL_OFFSET ss,4*8 */
+ CFI_REL_OFFSET rsp,3*8
+ /*CFI_REL_OFFSET rflags,2*8 */
+ /*CFI_REL_OFFSET cs,1*8 */
+ CFI_REL_OFFSET rip,0*8
+
/*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
+ * Interrupts are off on entry.
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
*/
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
ENABLE_INTERRUPTS(CLBR_NONE)
- movl %eax,%eax
- pushq_cfi %rax
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax,%eax
+
+ /* Construct struct pt_regs on stack (iret frame is already on stack) */
+ pushq_cfi_reg rax /* pt_regs->orig_ax */
+ pushq_cfi_reg rdi /* pt_regs->di */
+ pushq_cfi_reg rsi /* pt_regs->si */
+ pushq_cfi_reg rdx /* pt_regs->dx */
+ pushq_cfi_reg rcx /* pt_regs->cx */
+ pushq_cfi_reg rax /* pt_regs->ax */
cld
- /* note the registers are not zero extended to the sf.
- this could be a problem. */
- SAVE_ARGS 0,1,0
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
+ orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
ia32_do_call:
- IA32_ARG_FIXUP
+ /* 32bit syscall -> 64bit C ABI argument conversion */
+ movl %edi,%r8d /* arg5 */
+ movl %ebp,%r9d /* arg6 */
+ xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
+ movl %ebx,%edi /* arg1 */
+ movl %edx,%edx /* arg3 (zero extension) */
call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
ia32_sysret:
- movq %rax,RAX-ARGOFFSET(%rsp)
+ movq %rax,RAX(%rsp)
ia32_ret_from_sys_call:
- CLEAR_RREGS -ARGOFFSET
- jmp int_ret_from_sys_call
+ CLEAR_RREGS
+ jmp int_ret_from_sys_call
-ia32_tracesys:
- SAVE_REST
+ia32_tracesys:
+ SAVE_EXTRA_REGS
CLEAR_RREGS
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
+ LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
+ RESTORE_EXTRA_REGS
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
jmp ia32_do_call
END(ia32_syscall)
ia32_badsys:
- movq $0,ORIG_RAX-ARGOFFSET(%rsp)
+ movq $0,ORIG_RAX(%rsp)
movq $-ENOSYS,%rax
jmp ia32_sysret
@@ -479,8 +571,6 @@ GLOBAL(\label)
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
PTREGSCALL stub32_sigreturn, sys32_sigreturn
- PTREGSCALL stub32_execve, compat_sys_execve
- PTREGSCALL stub32_execveat, compat_sys_execveat
PTREGSCALL stub32_fork, sys_fork
PTREGSCALL stub32_vfork, sys_vfork
@@ -492,24 +582,23 @@ GLOBAL(stub32_clone)
ALIGN
ia32_ptregs_common:
- popq %r11
CFI_ENDPROC
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,SS+8-ARGOFFSET
- CFI_REL_OFFSET rax,RAX-ARGOFFSET
- CFI_REL_OFFSET rcx,RCX-ARGOFFSET
- CFI_REL_OFFSET rdx,RDX-ARGOFFSET
- CFI_REL_OFFSET rsi,RSI-ARGOFFSET
- CFI_REL_OFFSET rdi,RDI-ARGOFFSET
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
-/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
-/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
- CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
- SAVE_REST
+ CFI_DEF_CFA rsp,SIZEOF_PTREGS
+ CFI_REL_OFFSET rax,RAX
+ CFI_REL_OFFSET rcx,RCX
+ CFI_REL_OFFSET rdx,RDX
+ CFI_REL_OFFSET rsi,RSI
+ CFI_REL_OFFSET rdi,RDI
+ CFI_REL_OFFSET rip,RIP
+/* CFI_REL_OFFSET cs,CS*/
+/* CFI_REL_OFFSET rflags,EFLAGS*/
+ CFI_REL_OFFSET rsp,RSP
+/* CFI_REL_OFFSET ss,SS*/
+ SAVE_EXTRA_REGS 8
call *%rax
- RESTORE_REST
- jmp ia32_sysret /* misbalances the return cache */
+ RESTORE_EXTRA_REGS 8
+ ret
CFI_ENDPROC
END(ia32_ptregs_common)
diff --git a/arch/x86/ia32/nosyscall.c b/arch/x86/ia32/nosyscall.c
deleted file mode 100644
index 51ecd5b..0000000
--- a/arch/x86/ia32/nosyscall.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/errno.h>
-
-long compat_ni_syscall(void)
-{
- return -ENOSYS;
-}
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 8e0ceec..719cd70 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -201,20 +201,6 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
advice);
}
-long sys32_vm86_warning(void)
-{
- struct task_struct *me = current;
- static char lastcomm[sizeof(me->comm)];
-
- if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
- compat_printk(KERN_INFO
- "%s: vm86 mode not supported on 64 bit kernel\n",
- me->comm);
- strncpy(lastcomm, me->comm, sizeof(lastcomm));
- }
- return -ENOSYS;
-}
-
asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
size_t count)
{
diff --git a/arch/x86/ia32/syscall_ia32.c b/arch/x86/ia32/syscall_ia32.c
deleted file mode 100644
index 4754ba0..0000000
--- a/arch/x86/ia32/syscall_ia32.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* System call table for ia32 emulation. */
-
-#include <linux/linkage.h>
-#include <linux/sys.h>
-#include <linux/cache.h>
-#include <asm/asm-offsets.h>
-
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void compat(void) ;
-#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
-
-#define __SYSCALL_I386(nr, sym, compat) [nr] = compat,
-
-typedef void (*sys_call_ptr_t)(void);
-
-extern void compat_ni_syscall(void);
-
-const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_ia32_syscall_max] = &compat_ni_syscall,
-#include <asm/syscalls_32.h>
-};
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 372231c..bdf02ee 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -18,12 +18,63 @@
.endm
#endif
-.macro altinstruction_entry orig alt feature orig_len alt_len
+.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
.long \orig - .
.long \alt - .
.word \feature
.byte \orig_len
.byte \alt_len
+ .byte \pad_len
+.endm
+
+.macro ALTERNATIVE oldinstr, newinstr, feature
+140:
+ \oldinstr
+141:
+ .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr
+144:
+ .popsection
+.endm
+
+#define old_len 141b-140b
+#define new_len1 144f-143f
+#define new_len2 145f-144f
+
+/*
+ * max without conditionals. Idea adapted from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ */
+#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+140:
+ \oldinstr
+141:
+ .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
+ (alt_max_short(new_len1, new_len2) - (old_len)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
+ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr1
+144:
+ \newinstr2
+145:
+ .popsection
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 473bdbe..ba32af0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -48,8 +48,9 @@ struct alt_instr {
s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
- u8 replacementlen; /* length of new instruction, <= instrlen */
-};
+ u8 replacementlen; /* length of new instruction */
+ u8 padlen; /* length of build-time padding */
+} __packed;
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
@@ -76,50 +77,69 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
-#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
+#define b_replacement(num) "664"#num
+#define e_replacement(num) "665"#num
-#define b_replacement(number) "663"#number
-#define e_replacement(number) "664"#number
+#define alt_end_marker "663"
+#define alt_slen "662b-661b"
+#define alt_pad_len alt_end_marker"b-662b"
+#define alt_total_slen alt_end_marker"b-661b"
+#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
-#define alt_slen "662b-661b"
-#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+#define __OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
+ "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
-#define ALTINSTR_ENTRY(feature, number) \
+#define OLDINSTR(oldinstr, num) \
+ __OLDINSTR(oldinstr, num) \
+ alt_end_marker ":\n"
+
+/*
+ * max without conditionals. Idea adapted from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas works with s32s.
+ */
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+
+/*
+ * Pad the second replacement alternative with additional NOPs if it is
+ * additionally longer than the first replacement alternative.
+ */
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
+ "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
+ alt_end_marker ":\n"
+
+#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
- " .long " b_replacement(number)"f - .\n" /* new instruction */ \
+ " .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
- " .byte " alt_slen "\n" /* source len */ \
- " .byte " alt_rlen(number) "\n" /* replacement len */
-
-#define DISCARD_ENTRY(number) /* rlen <= slen */ \
- " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+ " .byte " alt_total_slen "\n" /* source len */ \
+ " .byte " alt_rlen(num) "\n" /* replacement len */ \
+ " .byte " alt_pad_len "\n" /* pad len */
-#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
- b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
+ b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
- OLDINSTR(oldinstr) \
+ OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
- ".pushsection .discard,\"aw\",@progbits\n" \
- DISCARD_ENTRY(1) \
- ".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
- OLDINSTR(oldinstr) \
+ OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
- ".pushsection .discard,\"aw\",@progbits\n" \
- DISCARD_ENTRY(1) \
- DISCARD_ENTRY(2) \
- ".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
@@ -146,6 +166,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
+#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
+
/*
* Alternative inline assembly with input.
*
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index efc3b22..976b86a 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
- alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
+ alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
@@ -204,7 +204,6 @@ extern void clear_local_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
-extern int verify_local_APIC(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 2ab1eb3..959e45b 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -95,13 +95,11 @@ do { \
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
- *
- * (Could use an alternative three way for this if there was one.)
*/
static __always_inline void rdtsc_barrier(void)
{
- alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
- alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+ alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+ "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 1f1297b..1c8b50e 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -55,143 +55,157 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code:
*/
-#define R15 0
-#define R14 8
-#define R13 16
-#define R12 24
-#define RBP 32
-#define RBX 40
-
-/* arguments: interrupts/non tracing syscalls only save up to here: */
-#define R11 48
-#define R10 56
-#define R9 64
-#define R8 72
-#define RAX 80
-#define RCX 88
-#define RDX 96
-#define RSI 104
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-/* end of arguments */
-
-/* cpu exception frame or undefined in case of fast syscall: */
-#define RIP 128
-#define CS 136
-#define EFLAGS 144
-#define RSP 152
-#define SS 160
-
-#define ARGOFFSET R11
-
- .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
- subq $9*8+\addskip, %rsp
- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
- movq_cfi rdi, 8*8
- movq_cfi rsi, 7*8
- movq_cfi rdx, 6*8
-
- .if \save_rcx
- movq_cfi rcx, 5*8
- .endif
+/* The layout forms the "struct pt_regs" on the stack: */
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
+#define R15 0*8
+#define R14 1*8
+#define R13 2*8
+#define R12 3*8
+#define RBP 4*8
+#define RBX 5*8
+/* These regs are callee-clobbered. Always saved on kernel entry. */
+#define R11 6*8
+#define R10 7*8
+#define R9 8*8
+#define R8 9*8
+#define RAX 10*8
+#define RCX 11*8
+#define RDX 12*8
+#define RSI 13*8
+#define RDI 14*8
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
+#define ORIG_RAX 15*8
+/* Return frame for iretq */
+#define RIP 16*8
+#define CS 17*8
+#define EFLAGS 18*8
+#define RSP 19*8
+#define SS 20*8
+
+#define SIZEOF_PTREGS 21*8
+
+ .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
+ subq $15*8+\addskip, %rsp
+ CFI_ADJUST_CFA_OFFSET 15*8+\addskip
+ .endm
- .if \rax_enosys
- movq $-ENOSYS, 4*8(%rsp)
- .else
- movq_cfi rax, 4*8
+ .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
+ .if \r11
+ movq_cfi r11, 6*8+\offset
.endif
-
- .if \save_r891011
- movq_cfi r8, 3*8
- movq_cfi r9, 2*8
- movq_cfi r10, 1*8
- movq_cfi r11, 0*8
+ .if \r8910
+ movq_cfi r10, 7*8+\offset
+ movq_cfi r9, 8*8+\offset
+ movq_cfi r8, 9*8+\offset
+ .endif
+ .if \rax
+ movq_cfi rax, 10*8+\offset
+ .endif
+ .if \rcx
+ movq_cfi rcx, 11*8+\offset
.endif
+ movq_cfi rdx, 12*8+\offset
+ movq_cfi rsi, 13*8+\offset
+ movq_cfi rdi, 14*8+\offset
+ .endm
+ .macro SAVE_C_REGS offset=0
+ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
+ SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_R891011
+ SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RCX_R891011
+ SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
+ SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
+ .endm
+
+ .macro SAVE_EXTRA_REGS offset=0
+ movq_cfi r15, 0*8+\offset
+ movq_cfi r14, 1*8+\offset
+ movq_cfi r13, 2*8+\offset
+ movq_cfi r12, 3*8+\offset
+ movq_cfi rbp, 4*8+\offset
+ movq_cfi rbx, 5*8+\offset
+ .endm
+ .macro SAVE_EXTRA_REGS_RBP offset=0
+ movq_cfi rbp, 4*8+\offset
+ .endm
+ .macro RESTORE_EXTRA_REGS offset=0
+ movq_cfi_restore 0*8+\offset, r15
+ movq_cfi_restore 1*8+\offset, r14
+ movq_cfi_restore 2*8+\offset, r13
+ movq_cfi_restore 3*8+\offset, r12
+ movq_cfi_restore 4*8+\offset, rbp
+ movq_cfi_restore 5*8+\offset, rbx
.endm
-#define ARG_SKIP (9*8)
+ .macro ZERO_EXTRA_REGS
+ xorl %r15d, %r15d
+ xorl %r14d, %r14d
+ xorl %r13d, %r13d
+ xorl %r12d, %r12d
+ xorl %ebp, %ebp
+ xorl %ebx, %ebx
+ .endm
- .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
- rstor_r8910=1, rstor_rdx=1
+ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11
- movq_cfi_restore 0*8, r11
+ movq_cfi_restore 6*8, r11
.endif
-
.if \rstor_r8910
- movq_cfi_restore 1*8, r10
- movq_cfi_restore 2*8, r9
- movq_cfi_restore 3*8, r8
+ movq_cfi_restore 7*8, r10
+ movq_cfi_restore 8*8, r9
+ movq_cfi_restore 9*8, r8
.endif
-
.if \rstor_rax
- movq_cfi_restore 4*8, rax
+ movq_cfi_restore 10*8, rax
.endif
-
.if \rstor_rcx
- movq_cfi_restore 5*8, rcx
+ movq_cfi_restore 11*8, rcx
.endif
-
.if \rstor_rdx
- movq_cfi_restore 6*8, rdx
- .endif
-
- movq_cfi_restore 7*8, rsi
- movq_cfi_restore 8*8, rdi
-
- .if ARG_SKIP+\addskip > 0
- addq $ARG_SKIP+\addskip, %rsp
- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
+ movq_cfi_restore 12*8, rdx
.endif
+ movq_cfi_restore 13*8, rsi
+ movq_cfi_restore 14*8, rdi
.endm
-
- .macro LOAD_ARGS offset, skiprax=0
- movq \offset(%rsp), %r11
- movq \offset+8(%rsp), %r10
- movq \offset+16(%rsp), %r9
- movq \offset+24(%rsp), %r8
- movq \offset+40(%rsp), %rcx
- movq \offset+48(%rsp), %rdx
- movq \offset+56(%rsp), %rsi
- movq \offset+64(%rsp), %rdi
- .if \skiprax
- .else
- movq \offset+72(%rsp), %rax
- .endif
+ .macro RESTORE_C_REGS
+ RESTORE_C_REGS_HELPER 1,1,1,1,1
.endm
-
-#define REST_SKIP (6*8)
-
- .macro SAVE_REST
- subq $REST_SKIP, %rsp
- CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq_cfi rbx, 5*8
- movq_cfi rbp, 4*8
- movq_cfi r12, 3*8
- movq_cfi r13, 2*8
- movq_cfi r14, 1*8
- movq_cfi r15, 0*8
+ .macro RESTORE_C_REGS_EXCEPT_RAX
+ RESTORE_C_REGS_HELPER 0,1,1,1,1
.endm
-
- .macro RESTORE_REST
- movq_cfi_restore 0*8, r15
- movq_cfi_restore 1*8, r14
- movq_cfi_restore 2*8, r13
- movq_cfi_restore 3*8, r12
- movq_cfi_restore 4*8, rbp
- movq_cfi_restore 5*8, rbx
- addq $REST_SKIP, %rsp
- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
+ .macro RESTORE_C_REGS_EXCEPT_RCX
+ RESTORE_C_REGS_HELPER 1,0,1,1,1
.endm
-
- .macro SAVE_ALL
- SAVE_ARGS
- SAVE_REST
+ .macro RESTORE_C_REGS_EXCEPT_R11
+ RESTORE_C_REGS_HELPER 1,1,0,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+ RESTORE_C_REGS_HELPER 1,0,0,1,1
+ .endm
+ .macro RESTORE_RSI_RDI
+ RESTORE_C_REGS_HELPER 0,0,0,0,0
+ .endm
+ .macro RESTORE_RSI_RDI_RDX
+ RESTORE_C_REGS_HELPER 0,0,0,0,1
.endm
- .macro RESTORE_ALL addskip=0
- RESTORE_REST
- RESTORE_ARGS 1, \addskip
+ .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+ addq $15*8+\addskip, %rsp
+ CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm
.macro icebp
@@ -210,37 +224,23 @@ For 32-bit we have the following conventions - kernel is built with
*/
.macro SAVE_ALL
- pushl_cfi %eax
- CFI_REL_OFFSET eax, 0
- pushl_cfi %ebp
- CFI_REL_OFFSET ebp, 0
- pushl_cfi %edi
- CFI_REL_OFFSET edi, 0
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
- pushl_cfi %edx
- CFI_REL_OFFSET edx, 0
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
+ pushl_cfi_reg eax
+ pushl_cfi_reg ebp
+ pushl_cfi_reg edi
+ pushl_cfi_reg esi
+ pushl_cfi_reg edx
+ pushl_cfi_reg ecx
+ pushl_cfi_reg ebx
.endm
.macro RESTORE_ALL
- popl_cfi %ebx
- CFI_RESTORE ebx
- popl_cfi %ecx
- CFI_RESTORE ecx
- popl_cfi %edx
- CFI_RESTORE edx
- popl_cfi %esi
- CFI_RESTORE esi
- popl_cfi %edi
- CFI_RESTORE edi
- popl_cfi %ebp
- CFI_RESTORE ebp
- popl_cfi %eax
- CFI_RESTORE eax
+ popl_cfi_reg ebx
+ popl_cfi_reg ecx
+ popl_cfi_reg edx
+ popl_cfi_reg esi
+ popl_cfi_reg edi
+ popl_cfi_reg ebp
+ popl_cfi_reg eax
.endm
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 59c6c40..acdee09 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -301,7 +301,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
sp = task_pt_regs(current)->sp;
} else {
/* -128 for the x32 ABI redzone */
- sp = this_cpu_read(old_rsp) - 128;
+ sp = task_pt_regs(current)->sp - 128;
}
return (void __user *)round_down(sp - len, 16);
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index d2b1298..bf2caa1 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -34,8 +34,6 @@ extern int _debug_hotplug_cpu(int cpu, int action);
#endif
#endif
-DECLARE_PER_CPU(int, cpu_state);
-
int mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 90a5485..7ee9b94 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -12,7 +12,7 @@
#include <asm/disabled-features.h>
#endif
-#define NCAPINTS 11 /* N 32-bit words worth of info */
+#define NCAPINTS 13 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -195,6 +195,7 @@
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -226,12 +227,15 @@
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
@@ -242,6 +246,12 @@
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
/*
* BUG word(s)
*/
@@ -418,6 +428,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
+ " .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
@@ -432,6 +443,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
+ " .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
@@ -457,6 +469,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
+ " .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
@@ -483,31 +496,30 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
-/*
- * We need to spell the jumps to the compiler because, depending on the offset,
- * the replacement jump can be bigger than the original jump, and this we cannot
- * have. Thus, we force the jump to the widest, 4-byte, signed relative
- * offset even though the last would often fit in less bytes.
- */
- asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+ asm_volatile_goto("1: jmp %l[t_dynamic]\n"
"2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+ "3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
- " .long 3f - .\n" /* repl offset */
+ " .long 4f - .\n" /* repl offset */
" .word %P1\n" /* always replace */
- " .byte 2b - 1b\n" /* src len */
- " .byte 4f - 3f\n" /* repl len */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
- "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
- "4:\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
- " .byte 2b - 1b\n" /* src len */
+ " .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
+ " .byte 0\n" /* pad len */
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
@@ -527,6 +539,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
+ " .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
@@ -541,6 +554,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
+ " .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a94b82e..a0bf89f 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -376,11 +376,16 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
* Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected.
*/
-#define set_intr_gate(n, addr) \
+#define set_intr_gate_notrace(n, addr) \
do { \
BUG_ON((unsigned)n > 0xFF); \
_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
__KERNEL_CS); \
+ } while (0)
+
+#define set_intr_gate(n, addr) \
+ do { \
+ set_intr_gate_notrace(n, addr); \
_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
0, 0, __KERNEL_CS); \
} while (0)
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index f6f1598..de1cdaf 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -86,11 +86,23 @@
CFI_ADJUST_CFA_OFFSET 8
.endm
+ .macro pushq_cfi_reg reg
+ pushq %\reg
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET \reg, 0
+ .endm
+
.macro popq_cfi reg
popq \reg
CFI_ADJUST_CFA_OFFSET -8
.endm
+ .macro popq_cfi_reg reg
+ popq %\reg
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_RESTORE \reg
+ .endm
+
.macro pushfq_cfi
pushfq
CFI_ADJUST_CFA_OFFSET 8
@@ -116,11 +128,23 @@
CFI_ADJUST_CFA_OFFSET 4
.endm
+ .macro pushl_cfi_reg reg
+ pushl %\reg
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET \reg, 0
+ .endm
+
.macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
+ .macro popl_cfi_reg reg
+ popl %\reg
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE \reg
+ .endm
+
.macro pushfl_cfi
pushfl
CFI_ADJUST_CFA_OFFSET 4
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 779c2ef..3ab0537 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -40,14 +40,6 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
-#ifdef CONFIG_MEMTEST
-extern void early_memtest(unsigned long start, unsigned long end);
-#else
-static inline void early_memtest(unsigned long start, unsigned long end)
-{
-}
-#endif
-
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 sizet, u64 align);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 25bce45..3738b13 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -2,6 +2,8 @@
#define _ASM_X86_EFI_H
#include <asm/i387.h>
+#include <asm/pgtable.h>
+
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
@@ -89,8 +91,8 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
extern struct efi_scratch efi_scratch;
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
-extern void __init efi_call_phys_prolog(void);
-extern void __init efi_call_phys_epilog(void);
+extern pgd_t * __init efi_call_phys_prolog(void);
+extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
extern void __init efi_unmap_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index ca3347a..f161c18 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -171,10 +171,11 @@ do { \
static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds)
{
- regs->ax = regs->bx = regs->cx = regs->dx = 0;
- regs->si = regs->di = regs->bp = 0;
+ /* Commented-out registers are cleared in stub_execve */
+ /*regs->ax = regs->bx =*/ regs->cx = regs->dx = 0;
+ regs->si = regs->di /*= regs->bp*/ = 0;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
- regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
+ /*regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;*/
t->fs = t->gs = 0;
t->fsindex = t->gsindex = 0;
t->ds = t->es = ds;
@@ -338,9 +339,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
/*
* True on X86_32 or when emulating IA32 on X86_64
*/
@@ -365,6 +363,7 @@ enum align_flags {
struct va_alignment {
int flags;
unsigned long mask;
+ unsigned long bits;
} ____cacheline_aligned;
extern struct va_alignment va_align;
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0dbc082..da5e967 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -67,6 +67,34 @@ extern void finit_soft_fpu(struct i387_soft_struct *soft);
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
+/*
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
+ *
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
+ */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+ per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
+/*
+ * Used to indicate that the FPU state in memory is newer than the FPU
+ * state in registers, and the FPU state should be reloaded next time the
+ * task is run. Only safe on the current task, or non-running tasks.
+ */
+static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
+{
+ tsk->thread.fpu.last_cpu = ~0;
+}
+
+static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
+{
+ return new == this_cpu_read_stable(fpu_owner_task) &&
+ cpu == new->thread.fpu.last_cpu;
+}
+
static inline int is_ia32_compat_frame(void)
{
return config_enabled(CONFIG_IA32_EMULATION) &&
@@ -107,7 +135,6 @@ static __always_inline __pure bool use_fxsr(void)
static inline void fx_finit(struct i387_fxsave_struct *fx)
{
- memset(fx, 0, xstate_size);
fx->cwd = 0x37f;
fx->mxcsr = MXCSR_DEFAULT;
}
@@ -351,8 +378,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
__thread_set_has_fpu(tsk);
}
-static inline void __drop_fpu(struct task_struct *tsk)
+static inline void drop_fpu(struct task_struct *tsk)
{
+ /*
+ * Forget coprocessor state..
+ */
+ preempt_disable();
+ tsk->thread.fpu_counter = 0;
+
if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
@@ -360,30 +393,29 @@ static inline void __drop_fpu(struct task_struct *tsk)
_ASM_EXTABLE(1b, 2b));
__thread_fpu_end(tsk);
}
+
+ clear_stopped_child_used_math(tsk);
+ preempt_enable();
}
-static inline void drop_fpu(struct task_struct *tsk)
+static inline void restore_init_xstate(void)
{
- /*
- * Forget coprocessor state..
- */
- preempt_disable();
- tsk->thread.fpu_counter = 0;
- __drop_fpu(tsk);
- clear_used_math();
- preempt_enable();
+ if (use_xsave())
+ xrstor_state(init_xstate_buf, -1);
+ else
+ fxrstor_checking(&init_xstate_buf->i387);
}
-static inline void drop_init_fpu(struct task_struct *tsk)
+/*
+ * Reset the FPU state in the eager case and drop it in the lazy case (later use
+ * will reinit it).
+ */
+static inline void fpu_reset_state(struct task_struct *tsk)
{
if (!use_eager_fpu())
drop_fpu(tsk);
- else {
- if (use_xsave())
- xrstor_state(init_xstate_buf, -1);
- else
- fxrstor_checking(&init_xstate_buf->i387);
- }
+ else
+ restore_init_xstate();
}
/*
@@ -400,24 +432,6 @@ static inline void drop_init_fpu(struct task_struct *tsk)
*/
typedef struct { int preload; } fpu_switch_t;
-/*
- * Must be run with preemption disabled: this clears the fpu_owner_task,
- * on this CPU.
- *
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
- */
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
-{
- per_cpu(fpu_owner_task, cpu) = NULL;
-}
-
-static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
-{
- return new == this_cpu_read_stable(fpu_owner_task) &&
- cpu == new->thread.fpu.last_cpu;
-}
-
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
{
fpu_switch_t fpu;
@@ -426,13 +440,17 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
- fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
- new->thread.fpu_counter > 5);
+ fpu.preload = tsk_used_math(new) &&
+ (use_eager_fpu() || new->thread.fpu_counter > 5);
+
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
- cpu = ~0;
- old->thread.fpu.last_cpu = cpu;
- old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
+ task_disable_lazy_fpu_restore(old);
+ else
+ old->thread.fpu.last_cpu = cpu;
+
+ /* But leave fpu_owner_task! */
+ old->thread.fpu.has_fpu = 0;
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
@@ -443,10 +461,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
stts();
} else {
old->thread.fpu_counter = 0;
- old->thread.fpu.last_cpu = ~0;
+ task_disable_lazy_fpu_restore(old);
if (fpu.preload) {
new->thread.fpu_counter++;
- if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
+ if (fpu_lazy_restore(new, cpu))
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
@@ -466,7 +484,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
{
if (fpu.preload) {
if (unlikely(restore_fpu_checking(new)))
- drop_init_fpu(new);
+ fpu_reset_state(new);
}
}
@@ -495,10 +513,12 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
}
/*
- * Need to be preemption-safe.
+ * Needs to be preemption-safe.
*
* NOTE! user_fpu_begin() must be used only immediately before restoring
- * it. This function does not do any save/restore on their own.
+ * the save state. It does not do any saving/restoring on its own. In
+ * lazy FPU mode, it is just an optimization to avoid a #NM exception,
+ * the task can lose the FPU right after preempt_enable().
*/
static inline void user_fpu_begin(void)
{
@@ -520,24 +540,6 @@ static inline void __save_fpu(struct task_struct *tsk)
}
/*
- * These disable preemption on their own and are safe
- */
-static inline void save_init_fpu(struct task_struct *tsk)
-{
- WARN_ON_ONCE(!__thread_has_fpu(tsk));
-
- if (use_eager_fpu()) {
- __save_fpu(tsk);
- return;
- }
-
- preempt_disable();
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
- preempt_enable();
-}
-
-/*
* i387 state interaction
*/
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 9662290..e9571dd 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -181,10 +181,9 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
-extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR
- - FIRST_EXTERNAL_VECTOR])(void);
+extern char irq_entries_start[];
#ifdef CONFIG_TRACING
-#define trace_interrupt interrupt
+#define trace_irq_entries_start irq_entries_start
#endif
#define VECTOR_UNDEFINED (-1)
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 47f29b1..e7814b7 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -69,7 +69,7 @@ struct insn {
const insn_byte_t *next_byte;
};
-#define MAX_INSN_SIZE 16
+#define MAX_INSN_SIZE 15
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
index f42a047..e37d6b3 100644
--- a/arch/x86/include/asm/iommu_table.h
+++ b/arch/x86/include/asm/iommu_table.h
@@ -79,11 +79,12 @@ struct iommu_table_entry {
* d). Similar to the 'init', except that this gets called from pci_iommu_init
* where we do have a memory allocator.
*
- * The standard vs the _FINISH differs in that the _FINISH variant will
- * continue detecting other IOMMUs in the call list after the
- * the detection routine returns a positive number. The _FINISH will
- * stop the execution chain. Both will still call the 'init' and
- * 'late_init' functions if they are set.
+ * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
+ * in that the former will continue detecting other IOMMUs in the call
+ * list after the detection routine returns a positive number, while the
+ * latter will stop the execution chain upon first successful detection.
+ * Both variants will still call the 'init' and 'late_init' functions if
+ * they are set.
*/
#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
__IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 0a8b519..b77f5ed 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -136,10 +136,6 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define USERGS_SYSRET32 \
swapgs; \
sysretl
-#define ENABLE_INTERRUPTS_SYSEXIT32 \
- swapgs; \
- sti; \
- sysexit
#else
#define INTERRUPT_RETURN iret
@@ -163,22 +159,27 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(flags);
}
+#endif /* !__ASSEMBLY__ */
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_TRACE_IRQFLAGS
+# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
+# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else
-
-#ifdef CONFIG_X86_64
-#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
-#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
+# define TRACE_IRQS_ON
+# define TRACE_IRQS_OFF
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_X86_64
+# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
+# define LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \
sti; \
- SAVE_REST; \
- LOCKDEP_SYS_EXIT; \
- RESTORE_REST; \
+ call lockdep_sys_exit_thunk; \
cli; \
TRACE_IRQS_OFF;
-
-#else
-#define ARCH_LOCKDEP_SYS_EXIT \
+# else
+# define LOCKDEP_SYS_EXIT \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
@@ -186,24 +187,12 @@ static inline int arch_irqs_disabled(void)
popl %edx; \
popl %ecx; \
popl %eax;
-
-#define ARCH_LOCKDEP_SYS_EXIT_IRQ
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
-# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
+# define LOCKDEP_SYS_EXIT_IRQ
+# endif
#else
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
-# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
-# else
# define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ
-# endif
-
+#endif
#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 6a2cefb..a4c1cf7 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <linux/types.h>
@@ -30,8 +30,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
@@ -44,4 +42,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a236e39..dea2e7e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}
-#define SELECTOR_TI_MASK (1 << 2)
-#define SELECTOR_RPL_MASK 0x03
-
-#define IOPL_SHIFT 12
-
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_MMU_HASH_SHIFT 10
@@ -345,6 +340,7 @@ struct kvm_pmu {
enum {
KVM_DEBUGREG_BP_ENABLED = 1,
KVM_DEBUGREG_WONT_EXIT = 2,
+ KVM_DEBUGREG_RELOAD = 4,
};
struct kvm_vcpu_arch {
@@ -431,6 +427,9 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+ int maxphyaddr;
+
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
@@ -550,11 +549,20 @@ struct kvm_arch_memory_slot {
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};
+/*
+ * We use as the mode the number of bits allocated in the LDR for the
+ * logical processor ID. It happens that these are all powers of two.
+ * This makes it is very easy to detect cases where the APICs are
+ * configured for multiple modes; in that case, we cannot use the map and
+ * hence cannot use kvm_irq_delivery_to_apic_fast either.
+ */
+#define KVM_APIC_MODE_XAPIC_CLUSTER 4
+#define KVM_APIC_MODE_XAPIC_FLAT 8
+#define KVM_APIC_MODE_X2APIC 16
+
struct kvm_apic_map {
struct rcu_head rcu;
- u8 ldr_bits;
- /* fields bellow are used to decode ldr values in different modes */
- u32 cid_shift, cid_mask, lid_mask, broadcast;
+ u8 mode;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
@@ -859,6 +867,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
@@ -933,6 +943,7 @@ struct x86_emulate_ctxt;
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -1128,7 +1139,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index e62cf89..c1adf33 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
static inline bool kvm_para_available(void)
{
- return 0;
+ return false;
}
static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index a455a53..2d29197 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -32,8 +32,8 @@ static inline int klp_check_compiler_support(void)
#endif
return 0;
}
-extern int klp_write_module_reloc(struct module *mod, unsigned long type,
- unsigned long loc, unsigned long value);
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value);
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9b3de99..1f5a86d 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -116,6 +116,12 @@ struct mca_config {
u32 rip_msr;
};
+struct mce_vendor_flags {
+ __u64 overflow_recov : 1, /* cpuid_ebx(80000007) */
+ __reserved_0 : 63;
+};
+extern struct mce_vendor_flags mce_flags;
+
extern struct mca_config mca_cfg;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
@@ -128,9 +134,11 @@ extern int mce_p5_enabled;
#ifdef CONFIG_X86_MCE
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
+void mcheck_vendor_init_severity(void);
#else
static inline int mcheck_init(void) { return 0; }
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
+static inline void mcheck_vendor_init_severity(void) {}
#endif
#ifdef CONFIG_X86_ANCIENT_MCE
@@ -183,11 +191,11 @@ typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
enum mcp_flags {
- MCP_TIMESTAMP = (1 << 0), /* log time stamp */
- MCP_UC = (1 << 1), /* log uncorrected errors */
- MCP_DONTLOG = (1 << 2), /* only clear, don't log */
+ MCP_TIMESTAMP = BIT(0), /* log time stamp */
+ MCP_UC = BIT(1), /* log uncorrected errors */
+ MCP_DONTLOG = BIT(2), /* only clear, don't log */
};
-void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
+bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
int mce_notify_irq(void);
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 201b520..2fb20d6 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -75,6 +75,79 @@ static inline void __exit exit_amd_microcode(void) {}
#ifdef CONFIG_MICROCODE_EARLY
#define MAX_UCODE_COUNT 128
+
+#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
+#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
+#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
+#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
+#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
+#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
+#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
+
+#define CPUID_IS(a, b, c, ebx, ecx, edx) \
+ (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
+
+/*
+ * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
+ * x86_vendor() gets vendor id for BSP.
+ *
+ * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
+ * coding, we still use x86_vendor() to get vendor id for AP.
+ *
+ * x86_vendor() gets vendor information directly from CPUID.
+ */
+static inline int x86_vendor(void)
+{
+ u32 eax = 0x00000000;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
+ return X86_VENDOR_INTEL;
+
+ if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
+ return X86_VENDOR_AMD;
+
+ return X86_VENDOR_UNKNOWN;
+}
+
+static inline unsigned int __x86_family(unsigned int sig)
+{
+ unsigned int x86;
+
+ x86 = (sig >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (sig >> 20) & 0xff;
+
+ return x86;
+}
+
+static inline unsigned int x86_family(void)
+{
+ u32 eax = 0x00000001;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ return __x86_family(eax);
+}
+
+static inline unsigned int x86_model(unsigned int sig)
+{
+ unsigned int x86, model;
+
+ x86 = __x86_family(sig);
+
+ model = (sig >> 4) & 0xf;
+
+ if (x86 == 0x6 || x86 == 0xf)
+ model += ((sig >> 16) & 0xf) << 4;
+
+ return model;
+}
+
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void);
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index dd4c200..2b9209c 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -56,12 +56,15 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
-extern int
-get_matching_microcode(unsigned int csig, int cpf, void *mc, int rev);
+extern int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc);
extern int microcode_sanity_check(void *mc, int print_err);
-extern int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev);
-extern int
-update_match_revision(struct microcode_header_intel *mc_header, int rev);
+extern int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc);
+
+static inline int
+revision_is_newer(struct microcode_header_intel *mc_header, int rev)
+{
+ return (mc_header->rev <= rev) ? 0 : 1;
+}
#ifdef CONFIG_MICROCODE_INTEL_EARLY
extern void __init load_ucode_intel_bsp(void);
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index a1410db..653dfa7 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
:: "a" (eax), "c" (ecx));
}
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+ trace_hardirqs_on();
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 95e11f7..c7c712f 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -40,8 +40,10 @@
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
#else
#include <asm/page_32_types.h>
+#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif /* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
@@ -51,8 +53,6 @@ extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
-extern bool kaslr_enabled;
-
static inline phys_addr_t get_max_mapped(void)
{
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 965c47d..8957810 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val)
{
pmdval_t ret;
@@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
val);
}
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
@@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
@@ -976,11 +976,6 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
-
-#define ENABLE_INTERRUPTS_SYSEXIT32 \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
- CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 7549b8b..f7b0b5c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -294,7 +294,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd;
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
@@ -308,13 +308,13 @@ struct pv_mmu_ops {
struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
-#endif /* PAGETABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
struct pv_lazy_ops lazy_mode;
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195d..164e3f8 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index c4412e9..bf7f8b5 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define pmd_pgtable(pmd) pmd_page(pmd)
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
@@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#endif /* CONFIG_X86_PAE */
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
@@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
___pud_free_tlb(tlb, pud);
}
-#endif /* PAGETABLE_LEVELS > 3 */
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* _ASM_X86_PGALLOC_H */
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index daacc23..3925764 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -17,7 +17,6 @@ typedef union {
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
-#define PAGETABLE_LEVELS 2
/*
* traditional i386 two-level paging structure:
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 1bd5876..bcc8962 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -24,8 +24,6 @@ typedef union {
#define SHARED_KERNEL_PMD 1
#endif
-#define PAGETABLE_LEVELS 3
-
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a0c35bf..fe57e7a 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT);
}
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
@@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud)
{
return 0;
}
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
@@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
-#endif /* PAGETABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 602b602..e6844df 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
-#define PAGETABLE_LEVELS 4
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8c7c108..78f0c8c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
@@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud)
}
#endif
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
diff --git a/arch/x86/include/asm/resume-trace.h b/arch/x86/include/asm/pm-trace.h
index 3ff1c2c..7b7ac42 100644
--- a/arch/x86/include/asm/resume-trace.h
+++ b/arch/x86/include/asm/pm-trace.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_RESUME_TRACE_H
-#define _ASM_X86_RESUME_TRACE_H
+#ifndef _ASM_X86_PM_TRACE_H
+#define _ASM_X86_PM_TRACE_H
#include <asm/asm.h>
@@ -14,8 +14,10 @@ do { \
".previous" \
:"=r" (tracedata) \
: "i" (__LINE__), "i" (__FILE__)); \
- generate_resume_trace(tracedata, user); \
+ generate_pm_trace(tracedata, user); \
} \
} while (0)
-#endif /* _ASM_X86_RESUME_TRACE_H */
+#define TRACE_SUSPEND(user) TRACE_RESUME(user)
+
+#endif /* _ASM_X86_PM_TRACE_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ec1c935..23ba676 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -109,6 +109,9 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
+ /* Cache QoS architectural values: */
+ int x86_cache_max_rmid; /* max index */
+ int x86_cache_occ_scale; /* scale to bytes */
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
@@ -210,8 +213,23 @@ struct x86_hw_tss {
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
- /* ss1 caches MSR_IA32_SYSENTER_CS: */
- unsigned short ss1, __ss1h;
+
+ /*
+ * We don't use ring 1, so ss1 is a convenient scratch space in
+ * the same cacheline as sp0. We use ss1 to cache the value in
+ * MSR_IA32_SYSENTER_CS. When we context switch
+ * MSR_IA32_SYSENTER_CS, we first check if the new value being
+ * written matches ss1, and, if it's not, then we wrmsr the new
+ * value and update ss1.
+ *
+ * The only reason we context switch MSR_IA32_SYSENTER_CS is
+ * that we set it to zero in vm86 tasks to avoid corrupting the
+ * stack if we were to go through the sysenter path from vm86
+ * mode.
+ */
+ unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
+
+ unsigned short __ss1h;
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
@@ -276,13 +294,17 @@ struct tss_struct {
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/*
- * .. and then another 0x100 bytes for the emergency kernel stack:
+ * Space for the temporary SYSENTER stack:
*/
- unsigned long stack[64];
+ unsigned long SYSENTER_stack[64];
} ____cacheline_aligned;
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+
+#ifdef CONFIG_X86_32
+DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#endif
/*
* Save the original ist values for checking stack pointers during debugging
@@ -474,7 +496,6 @@ struct thread_struct {
#ifdef CONFIG_X86_32
unsigned long sysenter_cs;
#else
- unsigned long usersp; /* Copy from PDA */
unsigned short es;
unsigned short ds;
unsigned short fsindex;
@@ -564,6 +585,16 @@ static inline void native_swapgs(void)
#endif
}
+static inline unsigned long current_top_of_stack(void)
+{
+#ifdef CONFIG_X86_64
+ return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
+#else
+ /* sp0 on x86_32 is special in and around vm86 mode. */
+ return this_cpu_read_stable(cpu_current_top_of_stack);
+#endif
+}
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
@@ -761,10 +792,10 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
-# define BASE_PREFETCH ASM_NOP4
+# define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH
#else
-# define BASE_PREFETCH "prefetcht0 (%1)"
+# define BASE_PREFETCH "prefetcht0 %P1"
#endif
/*
@@ -775,10 +806,9 @@ extern char ignore_fpu_irq;
*/
static inline void prefetch(const void *x)
{
- alternative_input(BASE_PREFETCH,
- "prefetchnta (%1)",
+ alternative_input(BASE_PREFETCH, "prefetchnta %P1",
X86_FEATURE_XMM,
- "r" (x));
+ "m" (*(const char *)x));
}
/*
@@ -788,10 +818,9 @@ static inline void prefetch(const void *x)
*/
static inline void prefetchw(const void *x)
{
- alternative_input(BASE_PREFETCH,
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
+ alternative_input(BASE_PREFETCH, "prefetchw %P1",
+ X86_FEATURE_3DNOWPREFETCH,
+ "m" (*(const char *)x));
}
static inline void spin_lock_prefetch(const void *x)
@@ -799,6 +828,9 @@ static inline void spin_lock_prefetch(const void *x)
prefetchw(x);
}
+#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
+ TOP_OF_KERNEL_STACK_PADDING)
+
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
@@ -809,39 +841,16 @@ static inline void spin_lock_prefetch(const void *x)
#define STACK_TOP_MAX STACK_TOP
#define INIT_THREAD { \
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
+ .sp0 = TOP_OF_INIT_STACK, \
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
}
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS { \
- .x86_tss = { \
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
- }, \
- .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
-}
-
extern unsigned long thread_saved_pc(struct task_struct *tsk);
-#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info) \
-({ \
- unsigned long *__ptr = (unsigned long *)(info); \
- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
-})
-
/*
- * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
* This is necessary to guarantee that the entire "struct pt_regs"
* is accessible even if the CPU haven't stored the SS/ESP registers
* on the stack (interrupt gate does not save these registers
@@ -850,11 +859,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* "struct pt_regs" is possible, but they may contain the
* completely wrong values.
*/
-#define task_pt_regs(task) \
-({ \
- struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
- __regs__ - 1; \
+#define task_pt_regs(task) \
+({ \
+ unsigned long __ptr = (unsigned long)task_stack_page(task); \
+ __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
+ ((struct pt_regs *)__ptr) - 1; \
})
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
@@ -886,11 +895,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
-#define INIT_TSS { \
- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+ .sp0 = TOP_OF_INIT_STACK \
}
/*
@@ -902,11 +907,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
-/*
- * User space RSP while inside the SYSCALL fast path
- */
-DECLARE_PER_CPU(unsigned long, old_rsp);
-
#endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 86fc2bb..19507ff 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -31,13 +31,17 @@ struct pt_regs {
#else /* __i386__ */
struct pt_regs {
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long bp;
unsigned long bx;
-/* arguments: non interrupts/non tracing syscalls only save up to here*/
+/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11;
unsigned long r10;
unsigned long r9;
@@ -47,9 +51,12 @@ struct pt_regs {
unsigned long dx;
unsigned long si;
unsigned long di;
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
unsigned long orig_ax;
-/* end of arguments */
-/* cpu exception frame or undefined */
+/* Return frame for iretq */
unsigned long ip;
unsigned long cs;
unsigned long flags;
@@ -89,11 +96,13 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
}
/*
- * user_mode_vm(regs) determines whether a register set came from user mode.
- * This is true if V8086 mode was enabled OR if the register set was from
- * protected mode with RPL-3 CS value. This tricky test checks that with
- * one comparison. Many places in the kernel can bypass this full check
- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
+ * user_mode(regs) determines whether a register set came from user
+ * mode. On x86_32, this is true if V8086 mode was enabled OR if the
+ * register set was from protected mode with RPL-3 CS value. This
+ * tricky test checks that with one comparison.
+ *
+ * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
+ * the extra check.
*/
static inline int user_mode(struct pt_regs *regs)
{
@@ -104,16 +113,6 @@ static inline int user_mode(struct pt_regs *regs)
#endif
}
-static inline int user_mode_vm(struct pt_regs *regs)
-{
-#ifdef CONFIG_X86_32
- return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
- USER_RPL;
-#else
- return user_mode(regs);
-#endif
-}
-
static inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
@@ -138,12 +137,8 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
#endif
}
-#define current_user_stack_pointer() this_cpu_read(old_rsp)
-/* ia32 vs. x32 difference */
-#define compat_user_stack_pointer() \
- (test_thread_flag(TIF_IA32) \
- ? current_pt_regs()->sp \
- : this_cpu_read(old_rsp))
+#define current_user_stack_pointer() current_pt_regs()->sp
+#define compat_user_stack_pointer() current_pt_regs()->sp
#endif
#ifdef CONFIG_X86_32
@@ -248,7 +243,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
*/
#define arch_ptrace_stop_needed(code, info) \
({ \
- set_thread_flag(TIF_NOTIFY_RESUME); \
+ force_iret(); \
false; \
})
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e..25b1cc0 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
+ u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index db257a5..5a9856e 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -3,8 +3,10 @@
#include <linux/const.h>
-/* Constructor for a conventional segment GDT (or LDT) entry */
-/* This is a macro so it can be used in initializers */
+/*
+ * Constructor for a conventional segment GDT (or LDT) entry.
+ * This is a macro so it can be used in initializers.
+ */
#define GDT_ENTRY(flags, base, limit) \
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
@@ -12,198 +14,228 @@
(((base) & _AC(0x00ffffff,ULL)) << 16) | \
(((limit) & _AC(0x0000ffff,ULL))))
-/* Simple and small GDT entries for booting only */
+/* Simple and small GDT entries for booting only: */
#define GDT_ENTRY_BOOT_CS 2
-#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
+#define GDT_ENTRY_BOOT_DS 3
+#define GDT_ENTRY_BOOT_TSS 4
+#define __BOOT_CS (GDT_ENTRY_BOOT_CS*8)
+#define __BOOT_DS (GDT_ENTRY_BOOT_DS*8)
+#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS*8)
+
+/*
+ * Bottom two bits of selector give the ring
+ * privilege level
+ */
+#define SEGMENT_RPL_MASK 0x3
-#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
-#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
+/* User mode is privilege level 3: */
+#define USER_RPL 0x3
-#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2)
-#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8)
+/* Bit 2 is Table Indicator (TI): selects between LDT or GDT */
+#define SEGMENT_TI_MASK 0x4
+/* LDT segment has TI set ... */
+#define SEGMENT_LDT 0x4
+/* ... GDT has it cleared */
+#define SEGMENT_GDT 0x0
-#define SEGMENT_RPL_MASK 0x3 /*
- * Bottom two bits of selector give the ring
- * privilege level
- */
-#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */
-#define USER_RPL 0x3 /* User mode is privilege level 3 */
-#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */
-#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */
+#define GDT_ENTRY_INVALID_SEG 0
#ifdef CONFIG_X86_32
/*
* The layout of the per-CPU GDT under Linux:
*
- * 0 - null
+ * 0 - null <=== cacheline #1
* 1 - reserved
* 2 - reserved
* 3 - reserved
*
- * 4 - unused <==== new cacheline
+ * 4 - unused <=== cacheline #2
* 5 - unused
*
* ------- start of TLS (Thread-Local Storage) segments:
*
* 6 - TLS segment #1 [ glibc's TLS segment ]
* 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
- * 8 - TLS segment #3
+ * 8 - TLS segment #3 <=== cacheline #3
* 9 - reserved
* 10 - reserved
* 11 - reserved
*
* ------- start of kernel segments:
*
- * 12 - kernel code segment <==== new cacheline
+ * 12 - kernel code segment <=== cacheline #4
* 13 - kernel data segment
* 14 - default user CS
* 15 - default user DS
- * 16 - TSS
+ * 16 - TSS <=== cacheline #5
* 17 - LDT
* 18 - PNPBIOS support (16->32 gate)
* 19 - PNPBIOS support
- * 20 - PNPBIOS support
+ * 20 - PNPBIOS support <=== cacheline #6
* 21 - PNPBIOS support
* 22 - PNPBIOS support
* 23 - APM BIOS support
- * 24 - APM BIOS support
+ * 24 - APM BIOS support <=== cacheline #7
* 25 - APM BIOS support
*
* 26 - ESPFIX small SS
* 27 - per-cpu [ offset to per-cpu data area ]
- * 28 - stack_canary-20 [ for stack protector ]
+ * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
* 29 - unused
* 30 - unused
* 31 - TSS for double fault handler
*/
-#define GDT_ENTRY_TLS_MIN 6
-#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+#define GDT_ENTRY_TLS_MIN 6
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+#define GDT_ENTRY_KERNEL_CS 12
+#define GDT_ENTRY_KERNEL_DS 13
#define GDT_ENTRY_DEFAULT_USER_CS 14
-
#define GDT_ENTRY_DEFAULT_USER_DS 15
+#define GDT_ENTRY_TSS 16
+#define GDT_ENTRY_LDT 17
+#define GDT_ENTRY_PNPBIOS_CS32 18
+#define GDT_ENTRY_PNPBIOS_CS16 19
+#define GDT_ENTRY_PNPBIOS_DS 20
+#define GDT_ENTRY_PNPBIOS_TS1 21
+#define GDT_ENTRY_PNPBIOS_TS2 22
+#define GDT_ENTRY_APMBIOS_BASE 23
+
+#define GDT_ENTRY_ESPFIX_SS 26
+#define GDT_ENTRY_PERCPU 27
+#define GDT_ENTRY_STACK_CANARY 28
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
-#define GDT_ENTRY_KERNEL_BASE (12)
+/*
+ * Number of entries in the GDT table:
+ */
+#define GDT_ENTRIES 32
-#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+/*
+ * Segment selector values corresponding to the above entries:
+ */
-#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8)
-#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
-#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5)
+/* segment for calling fn: */
+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32*8)
+/* code segment for BIOS: */
+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8)
-#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6)
-#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11)
+/* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */
+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == PNP_CS32)
-#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14)
-#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8)
+/* data segment for BIOS: */
+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8)
+/* transfer data segment: */
+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1*8)
+/* another data segment: */
+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2*8)
-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15)
#ifdef CONFIG_SMP
-#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
+# define __KERNEL_PERCPU (GDT_ENTRY_PERCPU*8)
#else
-#define __KERNEL_PERCPU 0
+# define __KERNEL_PERCPU 0
#endif
-#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16)
#ifdef CONFIG_CC_STACKPROTECTOR
-#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
+# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
#else
-#define __KERNEL_STACK_CANARY 0
+# define __KERNEL_STACK_CANARY 0
#endif
-#define GDT_ENTRY_DOUBLEFAULT_TSS 31
-
-/*
- * The GDT has 32 entries
- */
-#define GDT_ENTRIES 32
+#else /* 64-bit: */
-/* The PnP BIOS entries in the GDT */
-#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
-#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
-#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
-#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
-#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
-
-/* The PnP BIOS selectors */
-#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
-#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
-#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
-#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
-#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
+#include <asm/cache.h>
+#define GDT_ENTRY_KERNEL32_CS 1
+#define GDT_ENTRY_KERNEL_CS 2
+#define GDT_ENTRY_KERNEL_DS 3
/*
- * Matching rules for certain types of segments.
+ * We cannot use the same code segment descriptor for user and kernel mode,
+ * not even in long flat mode, because of different DPL.
+ *
+ * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes
+ * selectors:
+ *
+ * if returning to 32-bit userspace: cs = STAR.SYSRET_CS,
+ * if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16,
+ *
+ * ss = STAR.SYSRET_CS+8 (in either case)
+ *
+ * thus USER_DS should be between 32-bit and 64-bit code selectors:
*/
+#define GDT_ENTRY_DEFAULT_USER32_CS 4
+#define GDT_ENTRY_DEFAULT_USER_DS 5
+#define GDT_ENTRY_DEFAULT_USER_CS 6
-/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
-
+/* Needs two entries */
+#define GDT_ENTRY_TSS 8
+/* Needs two entries */
+#define GDT_ENTRY_LDT 10
-#else
-#include <asm/cache.h>
-
-#define GDT_ENTRY_KERNEL32_CS 1
-#define GDT_ENTRY_KERNEL_CS 2
-#define GDT_ENTRY_KERNEL_DS 3
+#define GDT_ENTRY_TLS_MIN 12
+#define GDT_ENTRY_TLS_MAX 14
-#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8)
+/* Abused to load per CPU data from limit */
+#define GDT_ENTRY_PER_CPU 15
/*
- * we cannot use the same code segment descriptor for user and kernel
- * -- not even in the long flat mode, because of different DPL /kkeil
- * The segment offset needs to contain a RPL. Grr. -AK
- * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
+ * Number of entries in the GDT table:
*/
-#define GDT_ENTRY_DEFAULT_USER32_CS 4
-#define GDT_ENTRY_DEFAULT_USER_DS 5
-#define GDT_ENTRY_DEFAULT_USER_CS 6
-#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
-#define __USER32_DS __USER_DS
-
-#define GDT_ENTRY_TSS 8 /* needs two entries */
-#define GDT_ENTRY_LDT 10 /* needs two entries */
-#define GDT_ENTRY_TLS_MIN 12
-#define GDT_ENTRY_TLS_MAX 14
-
-#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
-#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
+#define GDT_ENTRIES 16
-/* TLS indexes for 64bit - hardcoded in arch_prctl */
-#define FS_TLS 0
-#define GS_TLS 1
-
-#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
-#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
-
-#define GDT_ENTRIES 16
+/*
+ * Segment selector values corresponding to the above entries:
+ *
+ * Note, selectors also need to have a correct RPL,
+ * expressed with the +3 value for user-space selectors:
+ */
+#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+#define __USER32_DS __USER_DS
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
+#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3)
+
+/* TLS indexes for 64-bit - hardcoded in arch_prctl(): */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
#endif
-#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
-#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
-#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
-#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
#ifndef CONFIG_PARAVIRT
-#define get_kernel_rpl() 0
+# define get_kernel_rpl() 0
#endif
-#define IDT_ENTRIES 256
-#define NUM_EXCEPTION_VECTORS 32
-/* Bitmask of exception vectors which push an error code on the stack */
-#define EXCEPTION_ERRCODE_MASK 0x00027d00
-#define GDT_SIZE (GDT_ENTRIES * 8)
-#define GDT_ENTRY_TLS_ENTRIES 3
-#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+#define IDT_ENTRIES 256
+#define NUM_EXCEPTION_VECTORS 32
+
+/* Bitmask of exception vectors which push an error code on the stack: */
+#define EXCEPTION_ERRCODE_MASK 0x00027d00
+
+#define GDT_SIZE (GDT_ENTRIES*8)
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
#ifdef CONFIG_TRACING
-#define trace_early_idt_handlers early_idt_handlers
+# define trace_early_idt_handlers early_idt_handlers
#endif
/*
@@ -228,37 +260,30 @@ do { \
} while (0)
/*
- * Save a segment register away
+ * Save a segment register away:
*/
#define savesegment(seg, value) \
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
/*
- * x86_32 user gs accessors.
+ * x86-32 user GS accessors:
*/
#ifdef CONFIG_X86_32
-#ifdef CONFIG_X86_32_LAZY_GS
-#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
-#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
-#define task_user_gs(tsk) ((tsk)->thread.gs)
-#define lazy_save_gs(v) savesegment(gs, (v))
-#define lazy_load_gs(v) loadsegment(gs, (v))
-#else /* X86_32_LAZY_GS */
-#define get_user_gs(regs) (u16)((regs)->gs)
-#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
-#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
-#define lazy_save_gs(v) do { } while (0)
-#define lazy_load_gs(v) do { } while (0)
-#endif /* X86_32_LAZY_GS */
+# ifdef CONFIG_X86_32_LAZY_GS
+# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
+# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
+# define task_user_gs(tsk) ((tsk)->thread.gs)
+# define lazy_save_gs(v) savesegment(gs, (v))
+# define lazy_load_gs(v) loadsegment(gs, (v))
+# else /* X86_32_LAZY_GS */
+# define get_user_gs(regs) (u16)((regs)->gs)
+# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
+# define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
+# define lazy_save_gs(v) do { } while (0)
+# define lazy_load_gs(v) do { } while (0)
+# endif /* X86_32_LAZY_GS */
#endif /* X86_32 */
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
- return __limit + 1;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ff4e7b2..f69e06b 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/
extern struct boot_params boot_params;
+static inline bool kaslr_enabled(void)
+{
+ return !!(boot_params.hdr.loadflags & KASLR_FLAG);
+}
+
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 9dfce4e..6fe6b18 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short gs;
- unsigned short fs;
- unsigned short __pad0;
+ unsigned short __pad2; /* Was called gs, but was always zero. */
+ unsigned short __pad1; /* Was called fs, but was always zero. */
+ unsigned short ss;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index 7a95816..89db467 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -13,9 +13,7 @@
X86_EFLAGS_CF | X86_EFLAGS_RF)
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-
-int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
- unsigned long *pax);
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc);
int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
struct pt_regs *regs, unsigned long mask);
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 8d3120f..ba665eb 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -27,23 +27,11 @@
#ifdef CONFIG_X86_SMAP
-#define ASM_CLAC \
- 661: ASM_NOP3 ; \
- .pushsection .altinstr_replacement, "ax" ; \
- 662: __ASM_CLAC ; \
- .popsection ; \
- .pushsection .altinstructions, "a" ; \
- altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
- .popsection
-
-#define ASM_STAC \
- 661: ASM_NOP3 ; \
- .pushsection .altinstr_replacement, "ax" ; \
- 662: __ASM_STAC ; \
- .popsection ; \
- .pushsection .altinstructions, "a" ; \
- altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
- .popsection
+#define ASM_CLAC \
+ ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+
+#define ASM_STAC \
+ ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
#else /* CONFIG_X86_SMAP */
@@ -61,20 +49,20 @@
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+ alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+ alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
#else /* CONFIG_X86_SMAP */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 8cd1cc3..17a8dce 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -150,12 +150,13 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
}
void cpu_disable_common(void);
-void cpu_die_common(unsigned int cpu);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
+void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
+int common_cpu_die(unsigned int cpu);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
void play_dead_common(void);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 6a4b00f..aeb4666e 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -4,6 +4,8 @@
#ifdef __KERNEL__
+#include <asm/nops.h>
+
static inline void native_clts(void)
{
asm volatile("clts");
@@ -199,6 +201,28 @@ static inline void clflushopt(volatile void *__p)
"+m" (*(volatile char __force *)__p));
}
+static inline void clwb(volatile void *__p)
+{
+ volatile struct { char x[64]; } *p = __p;
+
+ asm volatile(ALTERNATIVE_2(
+ ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
+ ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
+ X86_FEATURE_CLFLUSHOPT,
+ ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
+ X86_FEATURE_CLWB)
+ : [p] "+m" (*p)
+ : [pax] "a" (p));
+}
+
+static inline void pcommit_sfence(void)
+{
+ alternative(ASM_NOP7,
+ ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
+ "sfence",
+ X86_FEATURE_PCOMMIT);
+}
+
#define nop() asm volatile ("nop")
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 1d4e4f2..ea2dbe8 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -13,6 +13,33 @@
#include <asm/types.h>
/*
+ * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
+ * reserve at the top of the kernel stack. We do it because of a nasty
+ * 32-bit corner case. On x86_32, the hardware stack frame is
+ * variable-length. Except for vm86 mode, struct pt_regs assumes a
+ * maximum-length frame. If we enter from CPL 0, the top 8 bytes of
+ * pt_regs don't actually exist. Ordinarily this doesn't matter, but it
+ * does in at least one case:
+ *
+ * If we take an NMI early enough in SYSENTER, then we can end up with
+ * pt_regs that extends above sp0. On the way out, in the espfix code,
+ * we can read the saved SS value, but that value will be above sp0.
+ * Without this offset, that can result in a page fault. (We are
+ * careful that, in this case, the value we read doesn't matter.)
+ *
+ * In vm86 mode, the hardware frame is much longer still, but we neither
+ * access the extra members from NMI context, nor do we write such a
+ * frame at sp0 at all.
+ *
+ * x86_64 has a fixed-length stack frame.
+ */
+#ifdef CONFIG_X86_32
+# define TOP_OF_KERNEL_STACK_PADDING 8
+#else
+# define TOP_OF_KERNEL_STACK_PADDING 0
+#endif
+
+/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
@@ -145,7 +172,6 @@ struct thread_info {
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
#define STACK_WARN (THREAD_SIZE/8)
-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
/*
* macros/functions for gaining access to the thread information structure
@@ -158,10 +184,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
static inline struct thread_info *current_thread_info(void)
{
- struct thread_info *ti;
- ti = (void *)(this_cpu_read_stable(kernel_stack) +
- KERNEL_STACK_OFFSET - THREAD_SIZE);
- return ti;
+ return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
}
static inline unsigned long current_stack_pointer(void)
@@ -177,16 +200,37 @@ static inline unsigned long current_stack_pointer(void)
#else /* !__ASSEMBLY__ */
-/* how to get the thread information struct from ASM */
+/* Load thread_info address into "reg" */
#define GET_THREAD_INFO(reg) \
_ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
+ _ASM_SUB $(THREAD_SIZE),reg ;
/*
- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
- * a certain register (to be used in assembler memory operands).
+ * ASM operand which evaluates to a 'thread_info' address of
+ * the current task, if it is known that "reg" is exactly "off"
+ * bytes below the top of the stack currently.
+ *
+ * ( The kernel stack's size is known at build time, it is usually
+ * 2 or 4 pages, and the bottom of the kernel stack contains
+ * the thread_info structure. So to access the thread_info very
+ * quickly from assembly code we can calculate down from the
+ * top of the kernel stack to the bottom, using constant,
+ * build-time calculations only. )
+ *
+ * For example, to fetch the current thread_info->flags value into %eax
+ * on x86-64 defconfig kernels, in syscall entry code where RSP is
+ * currently at exactly SIZEOF_PTREGS bytes away from the top of the
+ * stack:
+ *
+ * mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax
+ *
+ * will translate to:
+ *
+ * 8b 84 24 b8 c0 ff ff mov -0x3f48(%rsp), %eax
+ *
+ * which is below the current RSP by almost 16K.
*/
-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+#define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg)
#endif
@@ -236,6 +280,16 @@ static inline bool is_ia32_task(void)
#endif
return false;
}
+
+/*
+ * Force syscall return via IRET by making it look as if there was
+ * some work pending. IRET is our most capable (but slowest) syscall
+ * return path, which is able to restore modified SS, CS and certain
+ * EFLAGS values that other (fast) syscall return instructions
+ * are not able to restore properly.
+ */
+#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
+
#endif /* !__ASSEMBLY__ */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 12a26b9..f2f9b39 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -231,6 +231,6 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
}
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char *to, char *from, unsigned len);
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 44e6dd7..ab456dc 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,7 +7,6 @@
#define SETUP_DTB 2
#define SETUP_PCI 3
#define SETUP_EFI 4
-#define SETUP_KASLR 5
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -16,6 +15,7 @@
/* loadflags */
#define LOADED_HIGH (1<<0)
+#define KASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 3ce0791..1a4eae6 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -74,6 +74,24 @@
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+#define MSR_IA32_RTIT_CTL 0x00000570
+#define RTIT_CTL_TRACEEN BIT(0)
+#define RTIT_CTL_OS BIT(2)
+#define RTIT_CTL_USR BIT(3)
+#define RTIT_CTL_CR3EN BIT(7)
+#define RTIT_CTL_TOPA BIT(8)
+#define RTIT_CTL_TSC_EN BIT(10)
+#define RTIT_CTL_DISRETC BIT(11)
+#define RTIT_CTL_BRANCH_EN BIT(13)
+#define MSR_IA32_RTIT_STATUS 0x00000571
+#define RTIT_STATUS_CONTEXTEN BIT(1)
+#define RTIT_STATUS_TRIGGEREN BIT(2)
+#define RTIT_STATUS_ERROR BIT(4)
+#define RTIT_STATUS_STOPPED BIT(5)
+#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
+#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
+#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
+
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
index 7b0a55a..580aee3 100644
--- a/arch/x86/include/uapi/asm/ptrace-abi.h
+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
@@ -25,13 +25,17 @@
#else /* __i386__ */
#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
#define R15 0
#define R14 8
#define R13 16
#define R12 24
#define RBP 32
#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save up to here*/
+/* These regs are callee-clobbered. Always saved on kernel entry. */
#define R11 48
#define R10 56
#define R9 64
@@ -41,15 +45,17 @@
#define RDX 96
#define RSI 104
#define RDI 112
-#define ORIG_RAX 120 /* = ERROR */
-/* end of arguments */
-/* cpu exception frame or undefined in case of fast syscall. */
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
+#define ORIG_RAX 120
+/* Return frame for iretq */
#define RIP 128
#define CS 136
#define EFLAGS 144
#define RSP 152
#define SS 160
-#define ARGOFFSET R11
#endif /* __ASSEMBLY__ */
/* top of stack page */
diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h
index ac4b9aa..bc16115 100644
--- a/arch/x86/include/uapi/asm/ptrace.h
+++ b/arch/x86/include/uapi/asm/ptrace.h
@@ -41,13 +41,17 @@ struct pt_regs {
#ifndef __KERNEL__
struct pt_regs {
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save up to here*/
+/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11;
unsigned long r10;
unsigned long r9;
@@ -57,9 +61,12 @@ struct pt_regs {
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
+/* Return frame for iretq */
unsigned long rip;
unsigned long cs;
unsigned long eflags;
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index d8b9f90..16dc4e8 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,9 +177,24 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
- __u16 gs;
- __u16 fs;
- __u16 __pad0;
+
+ /*
+ * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
+ * Linux saved and restored fs and gs in these slots. This
+ * was counterproductive, as fsbase and gsbase were never
+ * saved, so arch_prctl was presumably unreliable.
+ *
+ * If these slots are ever needed for any other purpose, there
+ * is some risk that very old 64-bit binaries could get
+ * confused. I doubt that many such binaries still work,
+ * though, since the same patch in 2.5.64 also removed the
+ * 64-bit set_thread_area syscall, so it appears that there is
+ * no TLS API that works in both pre- and post-2.5.64 kernels.
+ */
+ __u16 __pad2; /* Was gs. */
+ __u16 __pad1; /* Was fs. */
+
+ __u16 ss;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index c5f1a1d..1fe9218 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -67,6 +67,7 @@
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
#define EXIT_REASON_PREEMPTION_TIMER 52
#define EXIT_REASON_INVVPID 53
#define EXIT_REASON_WBINVD 54
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index cdb1b70..c887cd9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o
+obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3d525c6..803b684 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
}
/*
+ * ACPI offers an alternative platform interface model that removes
+ * ACPI hardware requirements for platforms that do not implement
+ * the PC Architecture.
+ *
+ * We initialize the Hardware-reduced ACPI model here:
+ */
+static void __init acpi_reduced_hw_init(void)
+{
+ if (acpi_gbl_reduced_hardware) {
+ /*
+ * Override x86_init functions and bypass legacy pic
+ * in Hardware-reduced ACPI mode
+ */
+ x86_init.timers.timer_init = x86_init_noop;
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+ legacy_pic = &null_legacy_pic;
+ }
+}
+
+/*
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
*/
@@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void)
*/
early_acpi_process_madt();
+ /*
+ * Hardware-reduced ACPI mode initialization:
+ */
+ acpi_reduced_hw_init();
+
return 0;
}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 703130f..aef6531 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -52,10 +52,25 @@ static int __init setup_noreplace_paravirt(char *str)
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif
-#define DPRINTK(fmt, ...) \
-do { \
- if (debug_alternative) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+#define DPRINTK(fmt, args...) \
+do { \
+ if (debug_alternative) \
+ printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
+} while (0)
+
+#define DUMP_BYTES(buf, len, fmt, args...) \
+do { \
+ if (unlikely(debug_alternative)) { \
+ int j; \
+ \
+ if (!(len)) \
+ break; \
+ \
+ printk(KERN_DEBUG fmt, ##args); \
+ for (j = 0; j < (len) - 1; j++) \
+ printk(KERN_CONT "%02hhx ", buf[j]); \
+ printk(KERN_CONT "%02hhx\n", buf[j]); \
+ } \
} while (0)
/*
@@ -243,12 +258,89 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
void *text_poke_early(void *addr, const void *opcode, size_t len);
-/* Replace instructions with better alternatives for this CPU type.
- This runs before SMP is initialized to avoid SMP problems with
- self modifying code. This implies that asymmetric systems where
- APs have less capabilities than the boot processor are not handled.
- Tough. Make sure you disable such features by hand. */
+/*
+ * Are we looking at a near JMP with a 1 or 4-byte displacement.
+ */
+static inline bool is_jmp(const u8 opcode)
+{
+ return opcode == 0xeb || opcode == 0xe9;
+}
+
+static void __init_or_module
+recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
+{
+ u8 *next_rip, *tgt_rip;
+ s32 n_dspl, o_dspl;
+ int repl_len;
+
+ if (a->replacementlen != 5)
+ return;
+
+ o_dspl = *(s32 *)(insnbuf + 1);
+
+ /* next_rip of the replacement JMP */
+ next_rip = repl_insn + a->replacementlen;
+ /* target rip of the replacement JMP */
+ tgt_rip = next_rip + o_dspl;
+ n_dspl = tgt_rip - orig_insn;
+
+ DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
+
+ if (tgt_rip - orig_insn >= 0) {
+ if (n_dspl - 2 <= 127)
+ goto two_byte_jmp;
+ else
+ goto five_byte_jmp;
+ /* negative offset */
+ } else {
+ if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
+ goto two_byte_jmp;
+ else
+ goto five_byte_jmp;
+ }
+
+two_byte_jmp:
+ n_dspl -= 2;
+
+ insnbuf[0] = 0xeb;
+ insnbuf[1] = (s8)n_dspl;
+ add_nops(insnbuf + 2, 3);
+
+ repl_len = 2;
+ goto done;
+
+five_byte_jmp:
+ n_dspl -= 5;
+
+ insnbuf[0] = 0xe9;
+ *(s32 *)&insnbuf[1] = n_dspl;
+ repl_len = 5;
+
+done:
+
+ DPRINTK("final displ: 0x%08x, JMP 0x%lx",
+ n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
+}
+
+static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
+{
+ if (instr[0] != 0x90)
+ return;
+
+ add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+
+ DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
+ instr, a->instrlen - a->padlen, a->padlen);
+}
+
+/*
+ * Replace instructions with better alternatives for this CPU type. This runs
+ * before SMP is initialized to avoid SMP problems with self modifying code.
+ * This implies that asymmetric systems where APs have less capabilities than
+ * the boot processor are not handled. Tough. Make sure you disable such
+ * features by hand.
+ */
void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
@@ -256,10 +348,10 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN];
- DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
+ DPRINTK("alt table %p -> %p", start, end);
/*
* The scan order should be from start to end. A later scanned
- * alternative code can overwrite a previous scanned alternative code.
+ * alternative code can overwrite previously scanned alternative code.
* Some kernel functions (e.g. memcpy, memset, etc) use this order to
* patch code.
*
@@ -267,29 +359,54 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
* order.
*/
for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- BUG_ON(a->replacementlen > a->instrlen);
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
- if (!boot_cpu_has(a->cpuid))
+ if (!boot_cpu_has(a->cpuid)) {
+ if (a->padlen > 1)
+ optimize_nops(a, instr);
+
continue;
+ }
+
+ DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
+ a->cpuid >> 5,
+ a->cpuid & 0x1f,
+ instr, a->instrlen,
+ replacement, a->replacementlen, a->padlen);
+
+ DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
/* 0xe8 is a relative jump; fix the offset. */
- if (*insnbuf == 0xe8 && a->replacementlen == 5)
- *(s32 *)(insnbuf + 1) += replacement - instr;
+ if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+ *(s32 *)(insnbuf + 1) += replacement - instr;
+ DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
+ *(s32 *)(insnbuf + 1),
+ (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
+ }
+
+ if (a->replacementlen && is_jmp(replacement[0]))
+ recompute_jump(a, instr, replacement, insnbuf);
- add_nops(insnbuf + a->replacementlen,
- a->instrlen - a->replacementlen);
+ if (a->instrlen > a->replacementlen) {
+ add_nops(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+ DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
- text_poke_early(instr, insnbuf, a->instrlen);
+ text_poke_early(instr, insnbuf, insnbuf_sz);
}
}
#ifdef CONFIG_SMP
-
static void alternatives_smp_lock(const s32 *start, const s32 *end,
u8 *text, u8 *text_end)
{
@@ -371,8 +488,8 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
smp->locks_end = locks_end;
smp->text = text;
smp->text_end = text_end;
- DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
- __func__, smp->locks, smp->locks_end,
+ DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
+ smp->locks, smp->locks_end,
smp->text, smp->text_end, smp->name);
list_add_tail(&smp->next, &smp_alt_modules);
@@ -440,7 +557,7 @@ int alternatives_text_reserved(void *start, void *end)
return 0;
}
-#endif
+#endif /* CONFIG_SMP */
#ifdef CONFIG_PARAVIRT
void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
@@ -601,7 +718,7 @@ int poke_int3_handler(struct pt_regs *regs)
if (likely(!bp_patching_in_progress))
return 0;
- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
return 0;
/* set up the specified breakpoint handler */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ad3639a..dcb5285 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1084,67 +1084,6 @@ void lapic_shutdown(void)
local_irq_restore(flags);
}
-/*
- * This is to verify that we're looking at a real local APIC.
- * Check these against your board if the CPUs aren't getting
- * started for no apparent reason.
- */
-int __init verify_local_APIC(void)
-{
- unsigned int reg0, reg1;
-
- /*
- * The version register is read-only in a real APIC.
- */
- reg0 = apic_read(APIC_LVR);
- apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
- apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
- reg1 = apic_read(APIC_LVR);
- apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
-
- /*
- * The two version reads above should print the same
- * numbers. If the second one is different, then we
- * poke at a non-APIC.
- */
- if (reg1 != reg0)
- return 0;
-
- /*
- * Check if the version looks reasonably.
- */
- reg1 = GET_APIC_VERSION(reg0);
- if (reg1 == 0x00 || reg1 == 0xff)
- return 0;
- reg1 = lapic_get_maxlvt();
- if (reg1 < 0x02 || reg1 == 0xff)
- return 0;
-
- /*
- * The ID register is read/write in a real APIC.
- */
- reg0 = apic_read(APIC_ID);
- apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
- apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
- reg1 = apic_read(APIC_ID);
- apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
- apic_write(APIC_ID, reg0);
- if (reg1 != (reg0 ^ apic->apic_id_mask))
- return 0;
-
- /*
- * The next two are just to see if we have sane values.
- * They're only really relevant if we're in Virtual Wire
- * compatibility mode, but most boxes are anymore.
- */
- reg0 = apic_read(APIC_LVT0);
- apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
- reg1 = apic_read(APIC_LVT1);
- apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
-
- return 1;
-}
-
/**
* sync_Arb_IDs - synchronize APIC bus arbitration IDs
*/
@@ -2283,7 +2222,6 @@ int __init APIC_init_uniprocessor(void)
disable_ioapic_support();
default_setup_apic_routing();
- verify_local_APIC();
apic_bsp_setup(true);
return 0;
}
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c2fd21f..017149c 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -37,10 +37,12 @@ static const struct apic apic_numachip;
static unsigned int get_apic_id(unsigned long x)
{
unsigned long value;
- unsigned int id;
+ unsigned int id = (x >> 24) & 0xff;
- rdmsrl(MSR_FAM10H_NODE_ID, value);
- id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U);
+ if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ id |= (value << 2) & 0xff00;
+ }
return id;
}
@@ -155,10 +157,18 @@ static int __init numachip_probe(void)
static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
{
- if (c->phys_proc_id != node) {
- c->phys_proc_id = node;
- per_cpu(cpu_llc_id, smp_processor_id()) = node;
+ u64 val;
+ u32 nodes = 1;
+
+ this_cpu_write(cpu_llc_id, node);
+
+ /* Account for nodes per socket in multi-core-module processors */
+ if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+ rdmsrl(MSR_FAM10H_NODE_ID, val);
+ nodes = ((val >> 3) & 7) + 1;
}
+
+ c->phys_proc_id = node / nodes;
}
static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index e658f21..d9d0bd2 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -135,12 +135,12 @@ static void init_x2apic_ldr(void)
per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
- __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
+ cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
continue;
- __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
- __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
+ cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+ cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
}
}
@@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void)
BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
- __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
+ cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
register_hotcpu_notifier(&x2apic_cpu_notifier);
return 1;
}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8e9dcfd..c8d9295 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -144,33 +144,60 @@ static void __init uv_set_apicid_hibit(void)
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
- int pnodeid, is_uv1, is_uv2, is_uv3;
-
- is_uv1 = !strcmp(oem_id, "SGI");
- is_uv2 = !strcmp(oem_id, "SGI2");
- is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
- if (is_uv1 || is_uv2 || is_uv3) {
- uv_hub_info->hub_revision =
- (is_uv1 ? UV1_HUB_REVISION_BASE :
- (is_uv2 ? UV2_HUB_REVISION_BASE :
- UV3_HUB_REVISION_BASE));
- pnodeid = early_get_pnodeid();
- early_get_apic_pnode_shift();
- x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
- x86_platform.nmi_init = uv_nmi_init;
- if (!strcmp(oem_table_id, "UVL"))
- uv_system_type = UV_LEGACY_APIC;
- else if (!strcmp(oem_table_id, "UVX"))
- uv_system_type = UV_X2APIC;
- else if (!strcmp(oem_table_id, "UVH")) {
- __this_cpu_write(x2apic_extra_bits,
- pnodeid << uvh_apicid.s.pnode_shift);
- uv_system_type = UV_NON_UNIQUE_APIC;
- uv_set_apicid_hibit();
- return 1;
- }
+ int pnodeid;
+ int uv_apic;
+
+ if (strncmp(oem_id, "SGI", 3) != 0)
+ return 0;
+
+ /*
+ * Determine UV arch type.
+ * SGI: UV100/1000
+ * SGI2: UV2000/3000
+ * SGI3: UV300 (truncated to 4 chars because of different varieties)
+ */
+ uv_hub_info->hub_revision =
+ !strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
+ !strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
+ !strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
+
+ if (uv_hub_info->hub_revision == 0)
+ goto badbios;
+
+ pnodeid = early_get_pnodeid();
+ early_get_apic_pnode_shift();
+ x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
+ x86_platform.nmi_init = uv_nmi_init;
+
+ if (!strcmp(oem_table_id, "UVX")) { /* most common */
+ uv_system_type = UV_X2APIC;
+ uv_apic = 0;
+
+ } else if (!strcmp(oem_table_id, "UVH")) { /* only UV1 systems */
+ uv_system_type = UV_NON_UNIQUE_APIC;
+ __this_cpu_write(x2apic_extra_bits,
+ pnodeid << uvh_apicid.s.pnode_shift);
+ uv_set_apicid_hibit();
+ uv_apic = 1;
+
+ } else if (!strcmp(oem_table_id, "UVL")) { /* only used for */
+ uv_system_type = UV_LEGACY_APIC; /* very small systems */
+ uv_apic = 0;
+
+ } else {
+ goto badbios;
}
- return 0;
+
+ pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n",
+ oem_id, oem_table_id, uv_system_type,
+ uv_min_hub_revision_id, uv_apic);
+
+ return uv_apic;
+
+badbios:
+ pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
+ pr_err("Current BIOS not supported, update kernel and/or BIOS\n");
+ BUG();
}
enum uv_system_type get_uv_system_type(void)
@@ -854,10 +881,14 @@ void __init uv_system_init(void)
unsigned long mmr_base, present, paddr;
unsigned short pnode_mask;
unsigned char n_lshift;
- char *hub = (is_uv1_hub() ? "UV1" :
- (is_uv2_hub() ? "UV2" :
- "UV3"));
+ char *hub = (is_uv1_hub() ? "UV100/1000" :
+ (is_uv2_hub() ? "UV2000/3000" :
+ (is_uv3_hub() ? "UV300" : NULL)));
+ if (!hub) {
+ pr_err("UV: Unknown/unsupported UV hub\n");
+ return;
+ }
pr_info("UV: Found %s hub\n", hub);
map_low_mmrs();
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 3b3b9d3..47703ae 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -68,7 +68,7 @@ void foo(void)
/* Offset from the sysenter stack to tss.sp0 */
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
- sizeof(struct tss_struct));
+ offsetofend(struct tss_struct, SYSENTER_stack));
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
BLANK();
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index fdcbb4d..5ce6f2d 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -81,6 +81,7 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
BLANK();
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 80091ae..9bff687 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_pt.o perf_event_intel_bts.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
perf_event_intel_uncore_snb.o \
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a220239..fd470eb 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/random.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cpu.h>
@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
va_align.mask = (upperbit - 1) & PAGE_MASK;
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
+
+ /* A random value per boot for bit slice [12:upper_bit) */
+ va_align.bits = get_random_int() & va_align.mask;
}
}
@@ -711,6 +715,11 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
+
+ /* 3DNow or LM implies PREFETCHW */
+ if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
+ if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
+ set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2346c95..a62cf04 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[10] = eax;
}
+ /* Additional Intel-defined flags: level 0x0000000F */
+ if (c->cpuid_level >= 0x0000000F) {
+ u32 eax, ebx, ecx, edx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=0 */
+ cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[11] = edx;
+ if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
+ /* will be overridden if occupancy monitoring exists */
+ c->x86_cache_max_rmid = ebx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+ cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[12] = edx;
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
+ c->x86_cache_max_rmid = ecx;
+ c->x86_cache_occ_scale = ebx;
+ }
+ } else {
+ c->x86_cache_max_rmid = -1;
+ c->x86_cache_occ_scale = -1;
+ }
+ }
+
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
@@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
detect_nopl(c);
}
+static void x86_init_cache_qos(struct cpuinfo_x86 *c)
+{
+ /*
+ * The heavy lifting of max_rmid and cache_occ_scale are handled
+ * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
+ * in case CQM bits really aren't there in this CPU.
+ */
+ if (c != &boot_cpu_data) {
+ boot_cpu_data.x86_cache_max_rmid =
+ min(boot_cpu_data.x86_cache_max_rmid,
+ c->x86_cache_max_rmid);
+ }
+}
+
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
@@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
init_hypervisor(c);
x86_init_rdrand(c);
+ x86_init_cache_qos(c);
/*
* Clear/Set all flags overriden by options, need do it
@@ -959,38 +998,37 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#endif
}
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_IA32_EMULATION
-/* May not be __init: called during resume */
-static void syscall32_cpu_init(void)
-{
- /* Load these always in case some future AMD CPU supports
- SYSENTER from compat mode too. */
- wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
- wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-
- wrmsrl(MSR_CSTAR, ia32_cstar_target);
-}
-#endif /* CONFIG_IA32_EMULATION */
-#endif /* CONFIG_X86_64 */
-
+/*
+ * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
+ * on 32-bit kernels:
+ */
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
{
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ struct tss_struct *tss;
+ int cpu;
- if (!boot_cpu_has(X86_FEATURE_SEP)) {
- put_cpu();
- return;
- }
+ cpu = get_cpu();
+ tss = &per_cpu(cpu_tss, cpu);
+
+ if (!boot_cpu_has(X86_FEATURE_SEP))
+ goto out;
+
+ /*
+ * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
+ * see the big comment in struct x86_hw_tss's definition.
+ */
tss->x86_tss.ss1 = __KERNEL_CS;
- tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
- wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
+ wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
+
+ wrmsr(MSR_IA32_SYSENTER_ESP,
+ (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
+ 0);
+
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
+
+out:
put_cpu();
}
#endif
@@ -1118,7 +1156,7 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+ (unsigned long)&init_thread_union + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(kernel_stack);
#ifdef CONFIG_X86_64
@@ -1130,8 +1168,8 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
/*
- * The following four percpu variables are hot. Align current_task to
- * cacheline size such that all four fall in the same cacheline.
+ * The following percpu variables are hot. Align current_task to
+ * cacheline size such that they fall in the same cacheline.
*/
DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
&init_task;
@@ -1171,10 +1209,23 @@ void syscall_init(void)
*/
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call);
- wrmsrl(MSR_CSTAR, ignore_sysret);
#ifdef CONFIG_IA32_EMULATION
- syscall32_cpu_init();
+ wrmsrl(MSR_CSTAR, ia32_cstar_target);
+ /*
+ * This only works on Intel CPUs.
+ * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
+ * This does not cause SYSENTER to jump to the wrong location, because
+ * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
+ */
+ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+#else
+ wrmsrl(MSR_CSTAR, ignore_sysret);
+ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
#endif
/* Flags to clear on syscall */
@@ -1226,6 +1277,15 @@ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
+/*
+ * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
+ * the top of the kernel stack. Use an extra percpu variable to track the
+ * top of the kernel stack directly.
+ */
+DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
+ (unsigned long)&init_thread_union + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
+
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
@@ -1307,7 +1367,7 @@ void cpu_init(void)
*/
load_ucode_ap();
- t = &per_cpu(init_tss, cpu);
+ t = &per_cpu(cpu_tss, cpu);
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
@@ -1391,7 +1451,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
- struct tss_struct *t = &per_cpu(init_tss, cpu);
+ struct tss_struct *t = &per_cpu(cpu_tss, cpu);
struct thread_struct *thread = &curr->thread;
wait_for_master_cpu(cpu);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6596433..edcb0e2 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -7,16 +7,14 @@
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
-#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/compiler.h>
+#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/sched.h>
+#include <linux/sysfs.h>
#include <linux/pci.h>
#include <asm/processor.h>
-#include <linux/smp.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
@@ -116,10 +114,10 @@ static const struct _cache_table cache_table[] =
enum _cache_type {
- CACHE_TYPE_NULL = 0,
- CACHE_TYPE_DATA = 1,
- CACHE_TYPE_INST = 2,
- CACHE_TYPE_UNIFIED = 3
+ CTYPE_NULL = 0,
+ CTYPE_DATA = 1,
+ CTYPE_INST = 2,
+ CTYPE_UNIFIED = 3
};
union _cpuid4_leaf_eax {
@@ -159,11 +157,6 @@ struct _cpuid4_info_regs {
struct amd_northbridge *nb;
};
-struct _cpuid4_info {
- struct _cpuid4_info_regs base;
- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
-};
-
unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
@@ -220,6 +213,13 @@ static const unsigned short assocs[] = {
static const unsigned char levels[] = { 1, 1, 2, 3 };
static const unsigned char types[] = { 1, 2, 3, 3 };
+static const enum cache_type cache_type_map[] = {
+ [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
+ [CTYPE_INST] = CACHE_TYPE_INST,
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
+};
+
static void
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
@@ -291,14 +291,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
(ebx->split.ways_of_associativity + 1) - 1;
}
-struct _cache_attr {
- struct attribute attr;
- ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
- unsigned int);
-};
-
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
+
/*
* L3 cache descriptors
*/
@@ -325,20 +319,6 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb)
l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
}
-static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
-{
- int node;
-
- /* only for L3, and not in virtualized environments */
- if (index < 3)
- return;
-
- node = amd_get_nb_id(smp_processor_id());
- this_leaf->nb = node_to_amd_nb(node);
- if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
- amd_calc_l3_indices(this_leaf->nb);
-}
-
/*
* check whether a slot used for disabling an L3 index is occupied.
* @l3: L3 cache descriptor
@@ -359,15 +339,13 @@ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
return -1;
}
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
+static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
unsigned int slot)
{
int index;
+ struct amd_northbridge *nb = this_leaf->priv;
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
-
- index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
+ index = amd_get_l3_disable_slot(nb, slot);
if (index >= 0)
return sprintf(buf, "%d\n", index);
@@ -376,9 +354,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
#define SHOW_CACHE_DISABLE(slot) \
static ssize_t \
-show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
- unsigned int cpu) \
+cache_disable_##slot##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
return show_cache_disable(this_leaf, buf, slot); \
}
SHOW_CACHE_DISABLE(0)
@@ -446,25 +425,23 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
return 0;
}
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count,
- unsigned int slot)
+static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
+ const char *buf, size_t count,
+ unsigned int slot)
{
unsigned long val = 0;
int cpu, err = 0;
+ struct amd_northbridge *nb = this_leaf->priv;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
-
- cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+ cpu = cpumask_first(&this_leaf->shared_cpu_map);
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
- err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
+ err = amd_set_l3_disable_slot(nb, cpu, slot, val);
if (err) {
if (err == -EEXIST)
pr_warning("L3 slot %d in use/index already disabled!\n",
@@ -476,41 +453,36 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
#define STORE_CACHE_DISABLE(slot) \
static ssize_t \
-store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
- const char *buf, size_t count, \
- unsigned int cpu) \
+cache_disable_##slot##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
{ \
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
return store_cache_disable(this_leaf, buf, count, slot); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
- show_cache_disable_0, store_cache_disable_0);
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
- show_cache_disable_1, store_cache_disable_1);
-
-static ssize_t
-show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+static ssize_t subcaches_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&this_leaf->shared_cpu_map);
return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
}
-static ssize_t
-store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
- unsigned int cpu)
+static ssize_t subcaches_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&this_leaf->shared_cpu_map);
unsigned long val;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
-
if (kstrtoul(buf, 16, &val) < 0)
return -EINVAL;
@@ -520,9 +492,92 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
return count;
}
-static struct _cache_attr subcaches =
- __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
+static DEVICE_ATTR_RW(cache_disable_0);
+static DEVICE_ATTR_RW(cache_disable_1);
+static DEVICE_ATTR_RW(subcaches);
+
+static umode_t
+cache_private_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (!this_leaf->priv)
+ return 0;
+
+ if ((attr == &dev_attr_subcaches.attr) &&
+ amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return mode;
+
+ if ((attr == &dev_attr_cache_disable_0.attr ||
+ attr == &dev_attr_cache_disable_1.attr) &&
+ amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return mode;
+
+ return 0;
+}
+
+static struct attribute_group cache_private_group = {
+ .is_visible = cache_private_attrs_is_visible,
+};
+
+static void init_amd_l3_attrs(void)
+{
+ int n = 1;
+ static struct attribute **amd_l3_attrs;
+
+ if (amd_l3_attrs) /* already initialized */
+ return;
+
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ n += 2;
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ n += 1;
+
+ amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
+ if (!amd_l3_attrs)
+ return;
+
+ n = 0;
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
+ amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
+ }
+ if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
+ cache_private_group.attrs = amd_l3_attrs;
+}
+
+const struct attribute_group *
+cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ struct amd_northbridge *nb = this_leaf->priv;
+
+ if (this_leaf->level < 3 || !nb)
+ return NULL;
+
+ if (nb && nb->l3_cache.indices)
+ init_amd_l3_attrs();
+
+ return &cache_private_group;
+}
+
+static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
+{
+ int node;
+
+ /* only for L3, and not in virtualized environments */
+ if (index < 3)
+ return;
+
+ node = amd_get_nb_id(smp_processor_id());
+ this_leaf->nb = node_to_amd_nb(node);
+ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
+ amd_calc_l3_indices(this_leaf->nb);
+}
#else
#define amd_init_l3_cache(x, y)
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
@@ -546,7 +601,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
- if (eax.split.type == CACHE_TYPE_NULL)
+ if (eax.split.type == CTYPE_NULL)
return -EIO; /* better error ? */
this_leaf->eax = eax;
@@ -575,7 +630,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
/* Do cpuid(op) loop to find out num_cache_leaves */
cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
cache_eax.full = eax;
- } while (cache_eax.split.type != CACHE_TYPE_NULL);
+ } while (cache_eax.split.type != CTYPE_NULL);
return i;
}
@@ -626,9 +681,9 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
switch (this_leaf.eax.split.level) {
case 1:
- if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
+ if (this_leaf.eax.split.type == CTYPE_DATA)
new_l1d = this_leaf.size/1024;
- else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
+ else if (this_leaf.eax.split.type == CTYPE_INST)
new_l1i = this_leaf.size/1024;
break;
case 2:
@@ -747,55 +802,52 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
return l2;
}
-#ifdef CONFIG_SYSFS
-
-/* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
-
-#ifdef CONFIG_SMP
-
-static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
+static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
+ struct _cpuid4_info_regs *base)
{
- struct _cpuid4_info *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf;
int i, sibling;
if (cpu_has_topoext) {
unsigned int apicid, nshared, first, last;
- if (!per_cpu(ici_cpuid4_info, cpu))
- return 0;
-
- this_leaf = CPUID4_INFO_IDX(cpu, index);
- nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
+ this_leaf = this_cpu_ci->info_list + index;
+ nshared = base->eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared);
last = first + nshared - 1;
for_each_online_cpu(i) {
+ this_cpu_ci = get_cpu_cacheinfo(i);
+ if (!this_cpu_ci->info_list)
+ continue;
+
apicid = cpu_data(i).apicid;
if ((apicid < first) || (apicid > last))
continue;
- if (!per_cpu(ici_cpuid4_info, i))
- continue;
- this_leaf = CPUID4_INFO_IDX(i, index);
+
+ this_leaf = this_cpu_ci->info_list + index;
for_each_online_cpu(sibling) {
apicid = cpu_data(sibling).apicid;
if ((apicid < first) || (apicid > last))
continue;
- set_bit(sibling, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(sibling,
+ &this_leaf->shared_cpu_map);
}
}
} else if (index == 3) {
for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
- if (!per_cpu(ici_cpuid4_info, i))
+ this_cpu_ci = get_cpu_cacheinfo(i);
+ if (!this_cpu_ci->info_list)
continue;
- this_leaf = CPUID4_INFO_IDX(i, index);
+ this_leaf = this_cpu_ci->info_list + index;
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling))
continue;
- set_bit(sibling, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(sibling,
+ &this_leaf->shared_cpu_map);
}
}
} else
@@ -804,457 +856,86 @@ static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
return 1;
}
-static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
+static void __cache_cpumap_setup(unsigned int cpu, int index,
+ struct _cpuid4_info_regs *base)
{
- struct _cpuid4_info *this_leaf, *sibling_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86_vendor == X86_VENDOR_AMD) {
- if (cache_shared_amd_cpu_map_setup(cpu, index))
+ if (__cache_amd_cpumap_setup(cpu, index, base))
return;
}
- this_leaf = CPUID4_INFO_IDX(cpu, index);
- num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
+ this_leaf = this_cpu_ci->info_list + index;
+ num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
if (num_threads_sharing == 1)
- cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
- else {
- index_msb = get_count_order(num_threads_sharing);
-
- for_each_online_cpu(i) {
- if (cpu_data(i).apicid >> index_msb ==
- c->apicid >> index_msb) {
- cpumask_set_cpu(i,
- to_cpumask(this_leaf->shared_cpu_map));
- if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
- sibling_leaf =
- CPUID4_INFO_IDX(i, index);
- cpumask_set_cpu(cpu, to_cpumask(
- sibling_leaf->shared_cpu_map));
- }
- }
- }
- }
-}
-static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
-{
- struct _cpuid4_info *this_leaf, *sibling_leaf;
- int sibling;
-
- this_leaf = CPUID4_INFO_IDX(cpu, index);
- for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
- sibling_leaf = CPUID4_INFO_IDX(sibling, index);
- cpumask_clear_cpu(cpu,
- to_cpumask(sibling_leaf->shared_cpu_map));
- }
-}
-#else
-static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
-{
-}
-
-static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
-{
-}
-#endif
-
-static void free_cache_attributes(unsigned int cpu)
-{
- int i;
-
- for (i = 0; i < num_cache_leaves; i++)
- cache_remove_shared_cpu_map(cpu, i);
-
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
-}
-
-static void get_cpu_leaves(void *_retval)
-{
- int j, *retval = _retval, cpu = smp_processor_id();
+ return;
- /* Do cpuid and store the results */
- for (j = 0; j < num_cache_leaves; j++) {
- struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
+ index_msb = get_count_order(num_threads_sharing);
- *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
- if (unlikely(*retval < 0)) {
- int i;
+ for_each_online_cpu(i)
+ if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
- for (i = 0; i < j; i++)
- cache_remove_shared_cpu_map(cpu, i);
- break;
+ if (i == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+ sibling_leaf = sib_cpu_ci->info_list + index;
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
}
- cache_shared_cpu_map_setup(cpu, j);
- }
}
-static int detect_cache_attributes(unsigned int cpu)
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ struct _cpuid4_info_regs *base)
{
- int retval;
-
- if (num_cache_leaves == 0)
- return -ENOENT;
-
- per_cpu(ici_cpuid4_info, cpu) = kzalloc(
- sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
- return -ENOMEM;
-
- smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
- if (retval) {
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
- }
-
- return retval;
+ this_leaf->level = base->eax.split.level;
+ this_leaf->type = cache_type_map[base->eax.split.type];
+ this_leaf->coherency_line_size =
+ base->ebx.split.coherency_line_size + 1;
+ this_leaf->ways_of_associativity =
+ base->ebx.split.ways_of_associativity + 1;
+ this_leaf->size = base->size;
+ this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
+ this_leaf->physical_line_partition =
+ base->ebx.split.physical_line_partition + 1;
+ this_leaf->priv = base->nb;
}
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-#include <linux/cpu.h>
-
-/* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
-
-struct _index_kobject {
- struct kobject kobj;
- unsigned int cpu;
- unsigned short index;
-};
-
-/* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
-
-#define show_one_plus(file_name, object, val) \
-static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
- unsigned int cpu) \
-{ \
- return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
-}
-
-show_one_plus(level, base.eax.split.level, 0);
-show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
-show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
-show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
-show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
-
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
-{
- return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
-}
-
-static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
- int type, char *buf)
-{
- const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
- int ret;
-
- if (type)
- ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- cpumask_pr_args(mask));
- else
- ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb",
- cpumask_pr_args(mask));
- buf[ret++] = '\n';
- buf[ret] = '\0';
- return ret;
-}
-
-static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
- unsigned int cpu)
+static int __init_cache_level(unsigned int cpu)
{
- return show_shared_cpu_map_func(leaf, 0, buf);
-}
-
-static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
- unsigned int cpu)
-{
- return show_shared_cpu_map_func(leaf, 1, buf);
-}
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
-{
- switch (this_leaf->base.eax.split.type) {
- case CACHE_TYPE_DATA:
- return sprintf(buf, "Data\n");
- case CACHE_TYPE_INST:
- return sprintf(buf, "Instruction\n");
- case CACHE_TYPE_UNIFIED:
- return sprintf(buf, "Unified\n");
- default:
- return sprintf(buf, "Unknown\n");
- }
-}
-
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
-
-#define define_one_ro(_name) \
-static struct _cache_attr _name = \
- __ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(level);
-define_one_ro(type);
-define_one_ro(coherency_line_size);
-define_one_ro(physical_line_partition);
-define_one_ro(ways_of_associativity);
-define_one_ro(number_of_sets);
-define_one_ro(size);
-define_one_ro(shared_cpu_map);
-define_one_ro(shared_cpu_list);
-
-static struct attribute *default_attrs[] = {
- &type.attr,
- &level.attr,
- &coherency_line_size.attr,
- &physical_line_partition.attr,
- &ways_of_associativity.attr,
- &number_of_sets.attr,
- &size.attr,
- &shared_cpu_map.attr,
- &shared_cpu_list.attr,
- NULL
-};
-
-#ifdef CONFIG_AMD_NB
-static struct attribute **amd_l3_attrs(void)
-{
- static struct attribute **attrs;
- int n;
-
- if (attrs)
- return attrs;
-
- n = ARRAY_SIZE(default_attrs);
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- n += 2;
-
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- n += 1;
-
- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
- if (attrs == NULL)
- return attrs = default_attrs;
-
- for (n = 0; default_attrs[n]; n++)
- attrs[n] = default_attrs[n];
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
- attrs[n++] = &cache_disable_0.attr;
- attrs[n++] = &cache_disable_1.attr;
- }
-
- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- attrs[n++] = &subcaches.attr;
-
- return attrs;
-}
-#endif
-
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct _cache_attr *fattr = to_attr(attr);
- struct _index_kobject *this_leaf = to_object(kobj);
- ssize_t ret;
-
- ret = fattr->show ?
- fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf, this_leaf->cpu) :
- 0;
- return ret;
-}
-
-static ssize_t store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct _cache_attr *fattr = to_attr(attr);
- struct _index_kobject *this_leaf = to_object(kobj);
- ssize_t ret;
-
- ret = fattr->store ?
- fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
- buf, count, this_leaf->cpu) :
- 0;
- return ret;
-}
-
-static const struct sysfs_ops sysfs_ops = {
- .show = show,
- .store = store,
-};
-
-static struct kobj_type ktype_cache = {
- .sysfs_ops = &sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-static struct kobj_type ktype_percpu_entry = {
- .sysfs_ops = &sysfs_ops,
-};
-
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
-{
- kfree(per_cpu(ici_cache_kobject, cpu));
- kfree(per_cpu(ici_index_kobject, cpu));
- per_cpu(ici_cache_kobject, cpu) = NULL;
- per_cpu(ici_index_kobject, cpu) = NULL;
- free_cache_attributes(cpu);
-}
-
-static int cpuid4_cache_sysfs_init(unsigned int cpu)
-{
- int err;
-
- if (num_cache_leaves == 0)
+ if (!num_cache_leaves)
return -ENOENT;
-
- err = detect_cache_attributes(cpu);
- if (err)
- return err;
-
- /* Allocate all required memory */
- per_cpu(ici_cache_kobject, cpu) =
- kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
- goto err_out;
-
- per_cpu(ici_index_kobject, cpu) = kzalloc(
- sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
- goto err_out;
-
+ if (!this_cpu_ci)
+ return -EINVAL;
+ this_cpu_ci->num_levels = 3;
+ this_cpu_ci->num_leaves = num_cache_leaves;
return 0;
-
-err_out:
- cpuid4_cache_sysfs_exit(cpu);
- return -ENOMEM;
}
-static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
-
-/* Add/Remove cache interface for CPU device */
-static int cache_add_dev(struct device *dev)
+static int __populate_cache_leaves(unsigned int cpu)
{
- unsigned int cpu = dev->id;
- unsigned long i, j;
- struct _index_kobject *this_object;
- struct _cpuid4_info *this_leaf;
- int retval;
-
- retval = cpuid4_cache_sysfs_init(cpu);
- if (unlikely(retval < 0))
- return retval;
-
- retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
- &ktype_percpu_entry,
- &dev->kobj, "%s", "cache");
- if (retval < 0) {
- cpuid4_cache_sysfs_exit(cpu);
- return retval;
- }
+ unsigned int idx, ret;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct _cpuid4_info_regs id4_regs = {};
- for (i = 0; i < num_cache_leaves; i++) {
- this_object = INDEX_KOBJECT_PTR(cpu, i);
- this_object->cpu = cpu;
- this_object->index = i;
-
- this_leaf = CPUID4_INFO_IDX(cpu, i);
-
- ktype_cache.default_attrs = default_attrs;
-#ifdef CONFIG_AMD_NB
- if (this_leaf->base.nb)
- ktype_cache.default_attrs = amd_l3_attrs();
-#endif
- retval = kobject_init_and_add(&(this_object->kobj),
- &ktype_cache,
- per_cpu(ici_cache_kobject, cpu),
- "index%1lu", i);
- if (unlikely(retval)) {
- for (j = 0; j < i; j++)
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
- kobject_put(per_cpu(ici_cache_kobject, cpu));
- cpuid4_cache_sysfs_exit(cpu);
- return retval;
- }
- kobject_uevent(&(this_object->kobj), KOBJ_ADD);
+ for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
+ ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
+ if (ret)
+ return ret;
+ ci_leaf_init(this_leaf++, &id4_regs);
+ __cache_cpumap_setup(cpu, idx, &id4_regs);
}
- cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
-
- kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
return 0;
}
-static void cache_remove_dev(struct device *dev)
-{
- unsigned int cpu = dev->id;
- unsigned long i;
-
- if (per_cpu(ici_cpuid4_info, cpu) == NULL)
- return;
- if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
- return;
- cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
-
- for (i = 0; i < num_cache_leaves; i++)
- kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
- kobject_put(per_cpu(ici_cache_kobject, cpu));
- cpuid4_cache_sysfs_exit(cpu);
-}
-
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
-
- dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cache_add_dev(dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cache_remove_dev(dev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cacheinfo_cpu_notifier = {
- .notifier_call = cacheinfo_cpu_callback,
-};
-
-static int __init cache_sysfs_init(void)
-{
- int i, err = 0;
-
- if (num_cache_leaves == 0)
- return 0;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- struct device *dev = get_cpu_device(i);
-
- err = cache_add_dev(dev);
- if (err)
- goto out;
- }
- __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-
-out:
- cpu_notifier_register_done();
- return err;
-}
-
-device_initcall(cache_sysfs_init);
-
-#endif
+DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
+DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/kernel/cpu/intel_pt.h
new file mode 100644
index 0000000..1c338b0
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_pt.h
@@ -0,0 +1,131 @@
+/*
+ * Intel(R) Processor Trace PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Intel PT is specified in the Intel Architecture Instruction Set Extensions
+ * Programming Reference:
+ * http://software.intel.com/en-us/intel-isa-extensions
+ */
+
+#ifndef __INTEL_PT_H__
+#define __INTEL_PT_H__
+
+/*
+ * Single-entry ToPA: when this close to region boundary, switch
+ * buffers to avoid losing data.
+ */
+#define TOPA_PMI_MARGIN 512
+
+/*
+ * Table of Physical Addresses bits
+ */
+enum topa_sz {
+ TOPA_4K = 0,
+ TOPA_8K,
+ TOPA_16K,
+ TOPA_32K,
+ TOPA_64K,
+ TOPA_128K,
+ TOPA_256K,
+ TOPA_512K,
+ TOPA_1MB,
+ TOPA_2MB,
+ TOPA_4MB,
+ TOPA_8MB,
+ TOPA_16MB,
+ TOPA_32MB,
+ TOPA_64MB,
+ TOPA_128MB,
+ TOPA_SZ_END,
+};
+
+static inline unsigned int sizes(enum topa_sz tsz)
+{
+ return 1 << (tsz + 12);
+};
+
+struct topa_entry {
+ u64 end : 1;
+ u64 rsvd0 : 1;
+ u64 intr : 1;
+ u64 rsvd1 : 1;
+ u64 stop : 1;
+ u64 rsvd2 : 1;
+ u64 size : 4;
+ u64 rsvd3 : 2;
+ u64 base : 36;
+ u64 rsvd4 : 16;
+};
+
+#define TOPA_SHIFT 12
+#define PT_CPUID_LEAVES 2
+
+enum pt_capabilities {
+ PT_CAP_max_subleaf = 0,
+ PT_CAP_cr3_filtering,
+ PT_CAP_topa_output,
+ PT_CAP_topa_multiple_entries,
+ PT_CAP_payloads_lip,
+};
+
+struct pt_pmu {
+ struct pmu pmu;
+ u32 caps[4 * PT_CPUID_LEAVES];
+};
+
+/**
+ * struct pt_buffer - buffer configuration; one buffer per task_struct or
+ * cpu, depending on perf event configuration
+ * @cpu: cpu for per-cpu allocation
+ * @tables: list of ToPA tables in this buffer
+ * @first: shorthand for first topa table
+ * @last: shorthand for last topa table
+ * @cur: current topa table
+ * @nr_pages: buffer size in pages
+ * @cur_idx: current output region's index within @cur table
+ * @output_off: offset within the current output region
+ * @data_size: running total of the amount of data in this buffer
+ * @lost: if data was lost/truncated
+ * @head: logical write offset inside the buffer
+ * @snapshot: if this is for a snapshot/overwrite counter
+ * @stop_pos: STOP topa entry in the buffer
+ * @intr_pos: INT topa entry in the buffer
+ * @data_pages: array of pages from perf
+ * @topa_index: table of topa entries indexed by page offset
+ */
+struct pt_buffer {
+ int cpu;
+ struct list_head tables;
+ struct topa *first, *last, *cur;
+ unsigned int cur_idx;
+ size_t output_off;
+ unsigned long nr_pages;
+ local_t data_size;
+ local_t lost;
+ local64_t head;
+ bool snapshot;
+ unsigned long stop_pos, intr_pos;
+ void **data_pages;
+ struct topa_entry *topa_index[0];
+};
+
+/**
+ * struct pt - per-cpu pt context
+ * @handle: perf output handle
+ * @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ */
+struct pt {
+ struct perf_output_handle handle;
+ int handle_nmi;
+};
+
+#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 10b4690..fe32074 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -14,6 +14,7 @@ enum severity_level {
};
#define ATTR_LEN 16
+#define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
/* One object for each MCE bank, shared by all CPUs */
struct mce_bank {
@@ -23,20 +24,20 @@ struct mce_bank {
char attrname[ATTR_LEN]; /* attribute name */
};
-int mce_severity(struct mce *a, int tolerant, char **msg, bool is_excp);
+extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
struct dentry *mce_get_debugfs_dir(void);
extern struct mce_bank *mce_banks;
extern mce_banks_t mce_banks_ce_disabled;
#ifdef CONFIG_X86_MCE_INTEL
-unsigned long mce_intel_adjust_timer(unsigned long interval);
-void mce_intel_cmci_poll(void);
+unsigned long cmci_intel_adjust_timer(unsigned long interval);
+bool mce_intel_cmci_poll(void);
void mce_intel_hcpu_update(unsigned long cpu);
void cmci_disable_bank(int bank);
#else
-# define mce_intel_adjust_timer mce_adjust_timer_default
-static inline void mce_intel_cmci_poll(void) { }
+# define cmci_intel_adjust_timer mce_adjust_timer_default
+static inline bool mce_intel_cmci_poll(void) { return false; }
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
static inline void cmci_disable_bank(int bank) { }
#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 8bb4330..9c682c2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -186,7 +186,61 @@ static int error_context(struct mce *m)
return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
}
-int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp)
+/*
+ * See AMD Error Scope Hierarchy table in a newer BKDG. For example
+ * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
+ */
+static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp)
+{
+ enum context ctx = error_context(m);
+
+ /* Processor Context Corrupt, no need to fumble too much, die! */
+ if (m->status & MCI_STATUS_PCC)
+ return MCE_PANIC_SEVERITY;
+
+ if (m->status & MCI_STATUS_UC) {
+
+ /*
+ * On older systems where overflow_recov flag is not present, we
+ * should simply panic if an error overflow occurs. If
+ * overflow_recov flag is present and set, then software can try
+ * to at least kill process to prolong system operation.
+ */
+ if (mce_flags.overflow_recov) {
+ /* software can try to contain */
+ if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
+ return MCE_PANIC_SEVERITY;
+
+ /* kill current process */
+ return MCE_AR_SEVERITY;
+ } else {
+ /* at least one error was not logged */
+ if (m->status & MCI_STATUS_OVER)
+ return MCE_PANIC_SEVERITY;
+ }
+
+ /*
+ * For any other case, return MCE_UC_SEVERITY so that we log the
+ * error and exit #MC handler.
+ */
+ return MCE_UC_SEVERITY;
+ }
+
+ /*
+ * deferred error: poll handler catches these and adds to mce_ring so
+ * memory-failure can take recovery actions.
+ */
+ if (m->status & MCI_STATUS_DEFERRED)
+ return MCE_DEFERRED_SEVERITY;
+
+ /*
+ * corrected error: poll handler catches these and passes responsibility
+ * of decoding the error to EDAC
+ */
+ return MCE_KEEP_SEVERITY;
+}
+
+static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp)
{
enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
enum context ctx = error_context(m);
@@ -216,6 +270,16 @@ int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp)
}
}
+/* Default to mce_severity_intel */
+int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
+ mce_severity_intel;
+
+void __init mcheck_vendor_init_severity(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ mce_severity = mce_severity_amd;
+}
+
#ifdef CONFIG_DEBUG_FS
static void *s_start(struct seq_file *f, loff_t *pos)
{
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3c036cb..e535533 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -60,11 +60,12 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
-#define SPINUNIT 100 /* 100ns */
+#define SPINUNIT 100 /* 100ns */
DEFINE_PER_CPU(unsigned, mce_exception_count);
struct mce_bank *mce_banks __read_mostly;
+struct mce_vendor_flags mce_flags __read_mostly;
struct mca_config mca_cfg __read_mostly = {
.bootlog = -1,
@@ -89,9 +90,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
static DEFINE_PER_CPU(struct mce, mces_seen);
static int cpu_missing;
-/* CMCI storm detection filter */
-static DEFINE_PER_CPU(unsigned long, mce_polled_error);
-
/*
* MCA banks polled by the period polling timer for corrected events.
* With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
@@ -622,8 +620,9 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
* is already totally * confused. In this case it's likely it will
* not fully execute the machine check handler either.
*/
-void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
+bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
{
+ bool error_logged = false;
struct mce m;
int severity;
int i;
@@ -646,7 +645,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
if (!(m.status & MCI_STATUS_VAL))
continue;
- this_cpu_write(mce_polled_error, 1);
+
/*
* Uncorrected or signalled events are handled by the exception
* handler when it is enabled, so don't process those here.
@@ -679,8 +678,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
* Don't get the IP here because it's unlikely to
* have anything to do with the actual error location.
*/
- if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
+ if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) {
+ error_logged = true;
mce_log(&m);
+ }
/*
* Clear state for this bank.
@@ -694,6 +695,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
*/
sync_core();
+
+ return error_logged;
}
EXPORT_SYMBOL_GPL(machine_check_poll);
@@ -813,7 +816,7 @@ static void mce_reign(void)
* other CPUs.
*/
if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
- mce_panic("Fatal Machine check", m, msg);
+ mce_panic("Fatal machine check", m, msg);
/*
* For UC somewhere we let the CPU who detects it handle it.
@@ -826,7 +829,7 @@ static void mce_reign(void)
* source or one CPU is hung. Panic.
*/
if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
- mce_panic("Machine check from unknown source", NULL, NULL);
+ mce_panic("Fatal machine check from unknown source", NULL, NULL);
/*
* Now clear all the mces_seen so that they don't reappear on
@@ -1258,7 +1261,7 @@ void mce_log_therm_throt_event(__u64 status)
* poller finds an MCE, poll 2x faster. When the poller finds no more
* errors, poll 2x slower (up to check_interval seconds).
*/
-static unsigned long check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
@@ -1268,49 +1271,57 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
return interval;
}
-static unsigned long (*mce_adjust_timer)(unsigned long interval) =
- mce_adjust_timer_default;
+static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-static int cmc_error_seen(void)
+static void __restart_timer(struct timer_list *t, unsigned long interval)
{
- unsigned long *v = this_cpu_ptr(&mce_polled_error);
+ unsigned long when = jiffies + interval;
+ unsigned long flags;
+
+ local_irq_save(flags);
- return test_and_clear_bit(0, v);
+ if (timer_pending(t)) {
+ if (time_before(when, t->expires))
+ mod_timer_pinned(t, when);
+ } else {
+ t->expires = round_jiffies(when);
+ add_timer_on(t, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
}
static void mce_timer_fn(unsigned long data)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
+ int cpu = smp_processor_id();
unsigned long iv;
- int notify;
- WARN_ON(smp_processor_id() != data);
+ WARN_ON(cpu != data);
+
+ iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
- machine_check_poll(MCP_TIMESTAMP,
- this_cpu_ptr(&mce_poll_banks));
- mce_intel_cmci_poll();
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
+
+ if (mce_intel_cmci_poll()) {
+ iv = mce_adjust_timer(iv);
+ goto done;
+ }
}
/*
- * Alert userspace if needed. If we logged an MCE, reduce the
- * polling interval, otherwise increase the polling interval.
+ * Alert userspace if needed. If we logged an MCE, reduce the polling
+ * interval, otherwise increase the polling interval.
*/
- iv = __this_cpu_read(mce_next_interval);
- notify = mce_notify_irq();
- notify |= cmc_error_seen();
- if (notify) {
+ if (mce_notify_irq())
iv = max(iv / 2, (unsigned long) HZ/100);
- } else {
+ else
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
- iv = mce_adjust_timer(iv);
- }
+
+done:
__this_cpu_write(mce_next_interval, iv);
- /* Might have become 0 after CMCI storm subsided */
- if (iv) {
- t->expires = jiffies + iv;
- add_timer_on(t, smp_processor_id());
- }
+ __restart_timer(t, iv);
}
/*
@@ -1319,16 +1330,10 @@ static void mce_timer_fn(unsigned long data)
void mce_timer_kick(unsigned long interval)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);
- if (timer_pending(t)) {
- if (time_before(when, t->expires))
- mod_timer_pinned(t, when);
- } else {
- t->expires = round_jiffies(when);
- add_timer_on(t, smp_processor_id());
- }
+ __restart_timer(t, interval);
+
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
}
@@ -1525,45 +1530,46 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
* Various K7s with broken bank 0 around. Always disable
* by default.
*/
- if (c->x86 == 6 && cfg->banks > 0)
+ if (c->x86 == 6 && cfg->banks > 0)
mce_banks[0].ctl = 0;
- /*
- * Turn off MC4_MISC thresholding banks on those models since
- * they're not supported there.
- */
- if (c->x86 == 0x15 &&
- (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
- int i;
- u64 val, hwcr;
- bool need_toggle;
- u32 msrs[] = {
+ /*
+ * overflow_recov is supported for F15h Models 00h-0fh
+ * even though we don't have a CPUID bit for it.
+ */
+ if (c->x86 == 0x15 && c->x86_model <= 0xf)
+ mce_flags.overflow_recov = 1;
+
+ /*
+ * Turn off MC4_MISC thresholding banks on those models since
+ * they're not supported there.
+ */
+ if (c->x86 == 0x15 &&
+ (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
+ int i;
+ u64 hwcr;
+ bool need_toggle;
+ u32 msrs[] = {
0x00000413, /* MC4_MISC0 */
0xc0000408, /* MC4_MISC1 */
- };
+ };
- rdmsrl(MSR_K7_HWCR, hwcr);
+ rdmsrl(MSR_K7_HWCR, hwcr);
- /* McStatusWrEn has to be set */
- need_toggle = !(hwcr & BIT(18));
+ /* McStatusWrEn has to be set */
+ need_toggle = !(hwcr & BIT(18));
- if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
+ if (need_toggle)
+ wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
- for (i = 0; i < ARRAY_SIZE(msrs); i++) {
- rdmsrl(msrs[i], val);
+ /* Clear CntP bit safely */
+ for (i = 0; i < ARRAY_SIZE(msrs); i++)
+ msr_clear_bit(msrs[i], 62);
- /* CntP bit set? */
- if (val & BIT_64(62)) {
- val &= ~BIT_64(62);
- wrmsrl(msrs[i], val);
- }
- }
-
- /* restore old settings */
- if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr);
- }
+ /* restore old settings */
+ if (need_toggle)
+ wrmsrl(MSR_K7_HWCR, hwcr);
+ }
}
if (c->x86_vendor == X86_VENDOR_INTEL) {
@@ -1629,10 +1635,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
mce_intel_feature_init(c);
- mce_adjust_timer = mce_intel_adjust_timer;
+ mce_adjust_timer = cmci_intel_adjust_timer;
break;
case X86_VENDOR_AMD:
mce_amd_feature_init(c);
+ mce_flags.overflow_recov = cpuid_ebx(0x80000007) & 0x1;
break;
default:
break;
@@ -2017,6 +2024,7 @@ __setup("mce", mcheck_enable);
int __init mcheck_init(void)
{
mcheck_intel_therm_init();
+ mcheck_vendor_init_severity();
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index f1c3769..55ad9b3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -79,7 +79,7 @@ static inline bool is_shared_bank(int bank)
return (bank == 4);
}
-static const char * const bank4_names(struct threshold_block *b)
+static const char *bank4_names(const struct threshold_block *b)
{
switch (b->address) {
/* MSR4_MISC0 */
@@ -250,6 +250,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!b.interrupt_capable)
goto init;
+ b.interrupt_enable = 1;
new = (high & MASK_LVTOFF_HI) >> 20;
offset = setup_APIC_mce(offset, new);
@@ -322,6 +323,8 @@ static void amd_threshold_interrupt(void)
log:
mce_setup(&m);
rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status);
+ if (!(m.status & MCI_STATUS_VAL))
+ return;
m.misc = ((u64)high << 32) | low;
m.bank = bank;
mce_log(&m);
@@ -497,10 +500,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
b->interrupt_capable = lvt_interrupt_supported(bank, high);
b->threshold_limit = THRESHOLD_MAX;
- if (b->interrupt_capable)
+ if (b->interrupt_capable) {
threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
- else
+ b->interrupt_enable = 1;
+ } else {
threshold_ktype.default_attrs[2] = NULL;
+ }
INIT_LIST_HEAD(&b->miscj);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index b3c97ba..b4a41cf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -39,6 +39,15 @@
static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
/*
+ * CMCI storm detection backoff counter
+ *
+ * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
+ * encountered an error. If not, we decrement it by one. We signal the end of
+ * the CMCI storm when it reaches 0.
+ */
+static DEFINE_PER_CPU(int, cmci_backoff_cnt);
+
+/*
* cmci_discover_lock protects against parallel discovery attempts
* which could race against each other.
*/
@@ -46,7 +55,7 @@ static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
#define CMCI_THRESHOLD 1
#define CMCI_POLL_INTERVAL (30 * HZ)
-#define CMCI_STORM_INTERVAL (1 * HZ)
+#define CMCI_STORM_INTERVAL (HZ)
#define CMCI_STORM_THRESHOLD 15
static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
@@ -82,11 +91,21 @@ static int cmci_supported(int *banks)
return !!(cap & MCG_CMCI_P);
}
-void mce_intel_cmci_poll(void)
+bool mce_intel_cmci_poll(void)
{
if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
- return;
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
+ return false;
+
+ /*
+ * Reset the counter if we've logged an error in the last poll
+ * during the storm.
+ */
+ if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)))
+ this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
+ else
+ this_cpu_dec(cmci_backoff_cnt);
+
+ return true;
}
void mce_intel_hcpu_update(unsigned long cpu)
@@ -97,31 +116,32 @@ void mce_intel_hcpu_update(unsigned long cpu)
per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
}
-unsigned long mce_intel_adjust_timer(unsigned long interval)
+unsigned long cmci_intel_adjust_timer(unsigned long interval)
{
- int r;
-
- if (interval < CMCI_POLL_INTERVAL)
- return interval;
+ if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
+ (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
+ mce_notify_irq();
+ return CMCI_STORM_INTERVAL;
+ }
switch (__this_cpu_read(cmci_storm_state)) {
case CMCI_STORM_ACTIVE:
+
/*
* We switch back to interrupt mode once the poll timer has
- * silenced itself. That means no events recorded and the
- * timer interval is back to our poll interval.
+ * silenced itself. That means no events recorded and the timer
+ * interval is back to our poll interval.
*/
__this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
- r = atomic_sub_return(1, &cmci_storm_on_cpus);
- if (r == 0)
+ if (!atomic_sub_return(1, &cmci_storm_on_cpus))
pr_notice("CMCI storm subsided: switching to interrupt mode\n");
+
/* FALLTHROUGH */
case CMCI_STORM_SUBSIDED:
/*
- * We wait for all cpus to go back to SUBSIDED
- * state. When that happens we switch back to
- * interrupt mode.
+ * We wait for all CPUs to go back to SUBSIDED state. When that
+ * happens we switch back to interrupt mode.
*/
if (!atomic_read(&cmci_storm_on_cpus)) {
__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
@@ -130,10 +150,8 @@ unsigned long mce_intel_adjust_timer(unsigned long interval)
}
return CMCI_POLL_INTERVAL;
default:
- /*
- * We have shiny weather. Let the poll do whatever it
- * thinks.
- */
+
+ /* We have shiny weather. Let the poll do whatever it thinks. */
return interval;
}
}
@@ -178,7 +196,8 @@ static bool cmci_storm_detect(void)
cmci_storm_disable_banks();
__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
r = atomic_add_return(1, &cmci_storm_on_cpus);
- mce_timer_kick(CMCI_POLL_INTERVAL);
+ mce_timer_kick(CMCI_STORM_INTERVAL);
+ this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
if (r == 1)
pr_notice("CMCI storm detected: switching to poll mode\n");
@@ -195,6 +214,7 @@ static void intel_threshold_interrupt(void)
{
if (cmci_storm_detect())
return;
+
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
mce_notify_irq();
}
@@ -286,6 +306,7 @@ void cmci_recheck(void)
if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
return;
+
local_irq_save(flags);
machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index bfbbe61..12829c3 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -21,7 +21,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
-#include <linux/pci_ids.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kernel.h>
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index d45df4bd..a413a69 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -23,57 +23,6 @@
#include <asm/processor.h>
#include <asm/cmdline.h>
-#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
-#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
-#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
-#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
-#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
-#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
-#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
-
-#define CPUID_IS(a, b, c, ebx, ecx, edx) \
- (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
-
-/*
- * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
- * x86_vendor() gets vendor id for BSP.
- *
- * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
- * coding, we still use x86_vendor() to get vendor id for AP.
- *
- * x86_vendor() gets vendor information directly through cpuid.
- */
-static int x86_vendor(void)
-{
- u32 eax = 0x00000000;
- u32 ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
- return X86_VENDOR_INTEL;
-
- if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
- return X86_VENDOR_AMD;
-
- return X86_VENDOR_UNKNOWN;
-}
-
-static int x86_family(void)
-{
- u32 eax = 0x00000001;
- u32 ebx, ecx = 0, edx;
- int x86;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- x86 = (eax >> 8) & 0xf;
- if (x86 == 15)
- x86 += (eax >> 20) & 0xff;
-
- return x86;
-}
-
static bool __init check_loader_disabled_bsp(void)
{
#ifdef CONFIG_X86_32
@@ -96,7 +45,7 @@ static bool __init check_loader_disabled_bsp(void)
void __init load_ucode_bsp(void)
{
- int vendor, x86;
+ int vendor, family;
if (check_loader_disabled_bsp())
return;
@@ -105,15 +54,15 @@ void __init load_ucode_bsp(void)
return;
vendor = x86_vendor();
- x86 = x86_family();
+ family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
- if (x86 >= 6)
+ if (family >= 6)
load_ucode_intel_bsp();
break;
case X86_VENDOR_AMD:
- if (x86 >= 0x10)
+ if (family >= 0x10)
load_ucode_amd_bsp();
break;
default:
@@ -132,7 +81,7 @@ static bool check_loader_disabled_ap(void)
void load_ucode_ap(void)
{
- int vendor, x86;
+ int vendor, family;
if (check_loader_disabled_ap())
return;
@@ -141,15 +90,15 @@ void load_ucode_ap(void)
return;
vendor = x86_vendor();
- x86 = x86_family();
+ family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
- if (x86 >= 6)
+ if (family >= 6)
load_ucode_intel_ap();
break;
case X86_VENDOR_AMD:
- if (x86 >= 0x10)
+ if (family >= 0x10)
load_ucode_amd_ap();
break;
default:
@@ -179,18 +128,18 @@ int __init save_microcode_in_initrd(void)
void reload_early_microcode(void)
{
- int vendor, x86;
+ int vendor, family;
vendor = x86_vendor();
- x86 = x86_family();
+ family = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
- if (x86 >= 6)
+ if (family >= 6)
reload_ucode_intel();
break;
case X86_VENDOR_AMD:
- if (x86 >= 0x10)
+ if (family >= 0x10)
reload_ucode_amd();
break;
default:
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 746e7fd..a41bead 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -124,7 +124,7 @@ static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
cpf = cpu_sig.pf;
crev = cpu_sig.rev;
- return get_matching_microcode(csig, cpf, mc_intel, crev);
+ return get_matching_microcode(csig, cpf, crev, mc_intel);
}
static int apply_microcode_intel(int cpu)
@@ -226,7 +226,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
csig = uci->cpu_sig.sig;
cpf = uci->cpu_sig.pf;
- if (get_matching_microcode(csig, cpf, mc, new_rev)) {
+ if (get_matching_microcode(csig, cpf, new_rev, mc)) {
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index 420eb93..2f49ab4 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -16,6 +16,14 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+/*
+ * This needs to be before all headers so that pr_debug in printk.h doesn't turn
+ * printk calls into no_printk().
+ *
+ *#define DEBUG
+ */
+
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -28,6 +36,9 @@
#include <asm/tlbflush.h>
#include <asm/setup.h>
+#undef pr_fmt
+#define pr_fmt(fmt) "microcode: " fmt
+
static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
static struct mc_saved_data {
unsigned int mc_saved_count;
@@ -35,50 +46,45 @@ static struct mc_saved_data {
} mc_saved_data;
static enum ucode_state
-generic_load_microcode_early(struct microcode_intel **mc_saved_p,
- unsigned int mc_saved_count,
- struct ucode_cpu_info *uci)
+load_microcode_early(struct microcode_intel **saved,
+ unsigned int num_saved, struct ucode_cpu_info *uci)
{
struct microcode_intel *ucode_ptr, *new_mc = NULL;
- int new_rev = uci->cpu_sig.rev;
- enum ucode_state state = UCODE_OK;
- unsigned int mc_size;
- struct microcode_header_intel *mc_header;
- unsigned int csig = uci->cpu_sig.sig;
- unsigned int cpf = uci->cpu_sig.pf;
- int i;
+ struct microcode_header_intel *mc_hdr;
+ int new_rev, ret, i;
- for (i = 0; i < mc_saved_count; i++) {
- ucode_ptr = mc_saved_p[i];
+ new_rev = uci->cpu_sig.rev;
- mc_header = (struct microcode_header_intel *)ucode_ptr;
- mc_size = get_totalsize(mc_header);
- if (get_matching_microcode(csig, cpf, ucode_ptr, new_rev)) {
- new_rev = mc_header->rev;
- new_mc = ucode_ptr;
- }
- }
+ for (i = 0; i < num_saved; i++) {
+ ucode_ptr = saved[i];
+ mc_hdr = (struct microcode_header_intel *)ucode_ptr;
- if (!new_mc) {
- state = UCODE_NFOUND;
- goto out;
+ ret = get_matching_microcode(uci->cpu_sig.sig,
+ uci->cpu_sig.pf,
+ new_rev,
+ ucode_ptr);
+ if (!ret)
+ continue;
+
+ new_rev = mc_hdr->rev;
+ new_mc = ucode_ptr;
}
+ if (!new_mc)
+ return UCODE_NFOUND;
+
uci->mc = (struct microcode_intel *)new_mc;
-out:
- return state;
+ return UCODE_OK;
}
-static void
-microcode_pointer(struct microcode_intel **mc_saved,
- unsigned long *mc_saved_in_initrd,
- unsigned long initrd_start, int mc_saved_count)
+static inline void
+copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
+ unsigned long off, int num_saved)
{
int i;
- for (i = 0; i < mc_saved_count; i++)
- mc_saved[i] = (struct microcode_intel *)
- (mc_saved_in_initrd[i] + initrd_start);
+ for (i = 0; i < num_saved; i++)
+ mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
}
#ifdef CONFIG_X86_32
@@ -102,55 +108,27 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
#endif
static enum ucode_state
-load_microcode(struct mc_saved_data *mc_saved_data,
- unsigned long *mc_saved_in_initrd,
- unsigned long initrd_start,
- struct ucode_cpu_info *uci)
+load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+ unsigned long initrd_start, struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
unsigned int count = mc_saved_data->mc_saved_count;
if (!mc_saved_data->mc_saved) {
- microcode_pointer(mc_saved_tmp, mc_saved_in_initrd,
- initrd_start, count);
+ copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
- return generic_load_microcode_early(mc_saved_tmp, count, uci);
+ return load_microcode_early(mc_saved_tmp, count, uci);
} else {
#ifdef CONFIG_X86_32
microcode_phys(mc_saved_tmp, mc_saved_data);
- return generic_load_microcode_early(mc_saved_tmp, count, uci);
+ return load_microcode_early(mc_saved_tmp, count, uci);
#else
- return generic_load_microcode_early(mc_saved_data->mc_saved,
+ return load_microcode_early(mc_saved_data->mc_saved,
count, uci);
#endif
}
}
-static u8 get_x86_family(unsigned long sig)
-{
- u8 x86;
-
- x86 = (sig >> 8) & 0xf;
-
- if (x86 == 0xf)
- x86 += (sig >> 20) & 0xff;
-
- return x86;
-}
-
-static u8 get_x86_model(unsigned long sig)
-{
- u8 x86, x86_model;
-
- x86 = get_x86_family(sig);
- x86_model = (sig >> 4) & 0xf;
-
- if (x86 == 0x6 || x86 == 0xf)
- x86_model += ((sig >> 16) & 0xf) << 4;
-
- return x86_model;
-}
-
/*
* Given CPU signature and a microcode patch, this function finds if the
* microcode patch has matching family and model with the CPU.
@@ -159,42 +137,40 @@ static enum ucode_state
matching_model_microcode(struct microcode_header_intel *mc_header,
unsigned long sig)
{
- u8 x86, x86_model;
- u8 x86_ucode, x86_model_ucode;
+ unsigned int fam, model;
+ unsigned int fam_ucode, model_ucode;
struct extended_sigtable *ext_header;
unsigned long total_size = get_totalsize(mc_header);
unsigned long data_size = get_datasize(mc_header);
int ext_sigcount, i;
struct extended_signature *ext_sig;
- x86 = get_x86_family(sig);
- x86_model = get_x86_model(sig);
+ fam = __x86_family(sig);
+ model = x86_model(sig);
- x86_ucode = get_x86_family(mc_header->sig);
- x86_model_ucode = get_x86_model(mc_header->sig);
+ fam_ucode = __x86_family(mc_header->sig);
+ model_ucode = x86_model(mc_header->sig);
- if (x86 == x86_ucode && x86_model == x86_model_ucode)
+ if (fam == fam_ucode && model == model_ucode)
return UCODE_OK;
/* Look for ext. headers: */
if (total_size <= data_size + MC_HEADER_SIZE)
return UCODE_NFOUND;
- ext_header = (struct extended_sigtable *)
- mc_header + data_size + MC_HEADER_SIZE;
+ ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
+ ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
ext_sigcount = ext_header->count;
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
for (i = 0; i < ext_sigcount; i++) {
- x86_ucode = get_x86_family(ext_sig->sig);
- x86_model_ucode = get_x86_model(ext_sig->sig);
+ fam_ucode = __x86_family(ext_sig->sig);
+ model_ucode = x86_model(ext_sig->sig);
- if (x86 == x86_ucode && x86_model == x86_model_ucode)
+ if (fam == fam_ucode && model == model_ucode)
return UCODE_OK;
ext_sig++;
}
-
return UCODE_NFOUND;
}
@@ -204,7 +180,7 @@ save_microcode(struct mc_saved_data *mc_saved_data,
unsigned int mc_saved_count)
{
int i, j;
- struct microcode_intel **mc_saved_p;
+ struct microcode_intel **saved_ptr;
int ret;
if (!mc_saved_count)
@@ -213,39 +189,45 @@ save_microcode(struct mc_saved_data *mc_saved_data,
/*
* Copy new microcode data.
*/
- mc_saved_p = kmalloc(mc_saved_count*sizeof(struct microcode_intel *),
- GFP_KERNEL);
- if (!mc_saved_p)
+ saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
+ if (!saved_ptr)
return -ENOMEM;
for (i = 0; i < mc_saved_count; i++) {
- struct microcode_intel *mc = mc_saved_src[i];
- struct microcode_header_intel *mc_header = &mc->hdr;
- unsigned long mc_size = get_totalsize(mc_header);
- mc_saved_p[i] = kmalloc(mc_size, GFP_KERNEL);
- if (!mc_saved_p[i]) {
- ret = -ENOMEM;
- goto err;
- }
+ struct microcode_header_intel *mc_hdr;
+ struct microcode_intel *mc;
+ unsigned long size;
+
if (!mc_saved_src[i]) {
ret = -EINVAL;
goto err;
}
- memcpy(mc_saved_p[i], mc, mc_size);
+
+ mc = mc_saved_src[i];
+ mc_hdr = &mc->hdr;
+ size = get_totalsize(mc_hdr);
+
+ saved_ptr[i] = kmalloc(size, GFP_KERNEL);
+ if (!saved_ptr[i]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memcpy(saved_ptr[i], mc, size);
}
/*
* Point to newly saved microcode.
*/
- mc_saved_data->mc_saved = mc_saved_p;
+ mc_saved_data->mc_saved = saved_ptr;
mc_saved_data->mc_saved_count = mc_saved_count;
return 0;
err:
for (j = 0; j <= i; j++)
- kfree(mc_saved_p[j]);
- kfree(mc_saved_p);
+ kfree(saved_ptr[j]);
+ kfree(saved_ptr);
return ret;
}
@@ -257,48 +239,45 @@ err:
* - or if it is a newly discovered microcode patch.
*
* The microcode patch should have matching model with CPU.
+ *
+ * Returns: The updated number @num_saved of saved microcode patches.
*/
-static void _save_mc(struct microcode_intel **mc_saved, u8 *ucode_ptr,
- unsigned int *mc_saved_count_p)
+static unsigned int _save_mc(struct microcode_intel **mc_saved,
+ u8 *ucode_ptr, unsigned int num_saved)
{
- int i;
- int found = 0;
- unsigned int mc_saved_count = *mc_saved_count_p;
- struct microcode_header_intel *mc_header;
+ struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+ unsigned int sig, pf, new_rev;
+ int found = 0, i;
+
+ mc_hdr = (struct microcode_header_intel *)ucode_ptr;
+
+ for (i = 0; i < num_saved; i++) {
+ mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
+ sig = mc_saved_hdr->sig;
+ pf = mc_saved_hdr->pf;
+ new_rev = mc_hdr->rev;
+
+ if (!get_matching_sig(sig, pf, new_rev, ucode_ptr))
+ continue;
+
+ found = 1;
+
+ if (!revision_is_newer(mc_hdr, new_rev))
+ continue;
- mc_header = (struct microcode_header_intel *)ucode_ptr;
- for (i = 0; i < mc_saved_count; i++) {
- unsigned int sig, pf;
- unsigned int new_rev;
- struct microcode_header_intel *mc_saved_header =
- (struct microcode_header_intel *)mc_saved[i];
- sig = mc_saved_header->sig;
- pf = mc_saved_header->pf;
- new_rev = mc_header->rev;
-
- if (get_matching_sig(sig, pf, ucode_ptr, new_rev)) {
- found = 1;
- if (update_match_revision(mc_header, new_rev)) {
- /*
- * Found an older ucode saved before.
- * Replace the older one with this newer
- * one.
- */
- mc_saved[i] =
- (struct microcode_intel *)ucode_ptr;
- break;
- }
- }
- }
- if (i >= mc_saved_count && !found)
/*
- * This ucode is first time discovered in ucode file.
- * Save it to memory.
+ * Found an older ucode saved earlier. Replace it with
+ * this newer one.
*/
- mc_saved[mc_saved_count++] =
- (struct microcode_intel *)ucode_ptr;
+ mc_saved[i] = (struct microcode_intel *)ucode_ptr;
+ break;
+ }
+
+ /* Newly detected microcode, save it to memory. */
+ if (i >= num_saved && !found)
+ mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
- *mc_saved_count_p = mc_saved_count;
+ return num_saved;
}
/*
@@ -346,7 +325,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
continue;
}
- _save_mc(mc_saved_tmp, ucode_ptr, &mc_saved_count);
+ mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
ucode_ptr += mc_size;
}
@@ -372,7 +351,7 @@ out:
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
- u8 x86, x86_model;
+ unsigned int family, model;
struct cpu_signature csig;
unsigned int eax, ebx, ecx, edx;
@@ -387,10 +366,10 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_cpuid(&eax, &ebx, &ecx, &edx);
csig.sig = eax;
- x86 = get_x86_family(csig.sig);
- x86_model = get_x86_model(csig.sig);
+ family = __x86_family(csig.sig);
+ model = x86_model(csig.sig);
- if ((x86_model >= 5) || (x86 > 6)) {
+ if ((model >= 5) || (family > 6)) {
/* get processor flags from MSR 0x17 */
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
@@ -429,8 +408,7 @@ static void __ref show_saved_mc(void)
sig = uci.cpu_sig.sig;
pf = uci.cpu_sig.pf;
rev = uci.cpu_sig.rev;
- pr_debug("CPU%d: sig=0x%x, pf=0x%x, rev=0x%x\n",
- smp_processor_id(), sig, pf, rev);
+ pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
struct microcode_header_intel *mc_saved_header;
@@ -457,8 +435,7 @@ static void __ref show_saved_mc(void)
if (total_size <= data_size + MC_HEADER_SIZE)
continue;
- ext_header = (struct extended_sigtable *)
- mc_saved_header + data_size + MC_HEADER_SIZE;
+ ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
ext_sigcount = ext_header->count;
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
@@ -515,8 +492,7 @@ int save_mc_for_early(u8 *mc)
* Save the microcode patch mc in mc_save_tmp structure if it's a newer
* version.
*/
-
- _save_mc(mc_saved_tmp, mc, &mc_saved_count);
+ mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
/*
* Save the mc_save_tmp in global mc_saved_data.
@@ -548,12 +524,10 @@ EXPORT_SYMBOL_GPL(save_mc_for_early);
static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
static __init enum ucode_state
-scan_microcode(unsigned long start, unsigned long end,
- struct mc_saved_data *mc_saved_data,
- unsigned long *mc_saved_in_initrd,
- struct ucode_cpu_info *uci)
+scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+ unsigned long start, unsigned long size,
+ struct ucode_cpu_info *uci)
{
- unsigned int size = end - start + 1;
struct cpio_data cd;
long offset = 0;
#ifdef CONFIG_X86_32
@@ -569,10 +543,8 @@ scan_microcode(unsigned long start, unsigned long end,
if (!cd.data)
return UCODE_ERROR;
-
return get_matching_model_microcode(0, start, cd.data, cd.size,
- mc_saved_data, mc_saved_in_initrd,
- uci);
+ mc_saved_data, initrd, uci);
}
/*
@@ -704,7 +676,7 @@ int __init save_microcode_in_initrd_intel(void)
if (count == 0)
return ret;
- microcode_pointer(mc_saved, mc_saved_in_initrd, initrd_start, count);
+ copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
ret = save_microcode(&mc_saved_data, mc_saved, count);
if (ret)
pr_err("Cannot save microcode patches from initrd.\n");
@@ -716,52 +688,44 @@ int __init save_microcode_in_initrd_intel(void)
static void __init
_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
- unsigned long *mc_saved_in_initrd,
- unsigned long initrd_start_early,
- unsigned long initrd_end_early,
- struct ucode_cpu_info *uci)
+ unsigned long *initrd,
+ unsigned long start, unsigned long size)
{
+ struct ucode_cpu_info uci;
enum ucode_state ret;
- collect_cpu_info_early(uci);
- scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
- mc_saved_in_initrd, uci);
+ collect_cpu_info_early(&uci);
- ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
- initrd_start_early, uci);
+ ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
+ if (ret != UCODE_OK)
+ return;
- if (ret == UCODE_OK)
- apply_microcode_early(uci, true);
+ ret = load_microcode(mc_saved_data, initrd, start, &uci);
+ if (ret != UCODE_OK)
+ return;
+
+ apply_microcode_early(&uci, true);
}
-void __init
-load_ucode_intel_bsp(void)
+void __init load_ucode_intel_bsp(void)
{
- u64 ramdisk_image, ramdisk_size;
- unsigned long initrd_start_early, initrd_end_early;
- struct ucode_cpu_info uci;
+ u64 start, size;
#ifdef CONFIG_X86_32
- struct boot_params *boot_params_p;
+ struct boot_params *p;
- boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
- ramdisk_image = boot_params_p->hdr.ramdisk_image;
- ramdisk_size = boot_params_p->hdr.ramdisk_size;
- initrd_start_early = ramdisk_image;
- initrd_end_early = initrd_start_early + ramdisk_size;
+ p = (struct boot_params *)__pa_nodebug(&boot_params);
+ start = p->hdr.ramdisk_image;
+ size = p->hdr.ramdisk_size;
_load_ucode_intel_bsp(
- (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
- (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
- initrd_start_early, initrd_end_early, &uci);
+ (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+ start, size);
#else
- ramdisk_image = boot_params.hdr.ramdisk_image;
- ramdisk_size = boot_params.hdr.ramdisk_size;
- initrd_start_early = ramdisk_image + PAGE_OFFSET;
- initrd_end_early = initrd_start_early + ramdisk_size;
-
- _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd,
- initrd_start_early, initrd_end_early,
- &uci);
+ start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
+ size = boot_params.hdr.ramdisk_size;
+
+ _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
#endif
}
@@ -771,6 +735,7 @@ void load_ucode_intel_ap(void)
struct ucode_cpu_info uci;
unsigned long *mc_saved_in_initrd_p;
unsigned long initrd_start_addr;
+ enum ucode_state ret;
#ifdef CONFIG_X86_32
unsigned long *initrd_start_p;
@@ -793,8 +758,12 @@ void load_ucode_intel_ap(void)
return;
collect_cpu_info_early(&uci);
- load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
- initrd_start_addr, &uci);
+ ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
+ initrd_start_addr, &uci);
+
+ if (ret != UCODE_OK)
+ return;
+
apply_microcode_early(&uci, true);
}
@@ -808,8 +777,8 @@ void reload_ucode_intel(void)
collect_cpu_info_early(&uci);
- ret = generic_load_microcode_early(mc_saved_data.mc_saved,
- mc_saved_data.mc_saved_count, &uci);
+ ret = load_microcode_early(mc_saved_data.mc_saved,
+ mc_saved_data.mc_saved_count, &uci);
if (ret != UCODE_OK)
return;
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index ce69320..cd47a51 100644
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
@@ -38,12 +38,6 @@ update_match_cpu(unsigned int csig, unsigned int cpf,
return (!sigmatch(sig, csig, pf, cpf)) ? 0 : 1;
}
-int
-update_match_revision(struct microcode_header_intel *mc_header, int rev)
-{
- return (mc_header->rev <= rev) ? 0 : 1;
-}
-
int microcode_sanity_check(void *mc, int print_err)
{
unsigned long total_size, data_size, ext_table_size;
@@ -128,10 +122,9 @@ int microcode_sanity_check(void *mc, int print_err)
EXPORT_SYMBOL_GPL(microcode_sanity_check);
/*
- * return 0 - no update found
- * return 1 - found update
+ * Returns 1 if update has been found, 0 otherwise.
*/
-int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev)
+int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc)
{
struct microcode_header_intel *mc_header = mc;
struct extended_sigtable *ext_header;
@@ -159,16 +152,15 @@ int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev)
}
/*
- * return 0 - no update found
- * return 1 - found update
+ * Returns 1 if update has been found, 0 otherwise.
*/
-int get_matching_microcode(unsigned int csig, int cpf, void *mc, int rev)
+int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc)
{
- struct microcode_header_intel *mc_header = mc;
+ struct microcode_header_intel *mc_hdr = mc;
- if (!update_match_revision(mc_header, rev))
+ if (!revision_is_newer(mc_hdr, rev))
return 0;
- return get_matching_sig(csig, cpf, mc, rev);
+ return get_matching_sig(csig, cpf, rev, mc);
}
EXPORT_SYMBOL_GPL(get_matching_microcode);
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 36d99a3..3f20710 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -6,7 +6,7 @@
IN=$1
OUT=$2
-function dump_array()
+dump_array()
{
ARRAY=$1
SIZE=$2
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b71a7f8..87848eb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -263,6 +263,14 @@ static void hw_perf_event_destroy(struct perf_event *event)
}
}
+void hw_perf_lbr_event_destroy(struct perf_event *event)
+{
+ hw_perf_event_destroy(event);
+
+ /* undo the lbr/bts event accounting */
+ x86_del_exclusive(x86_lbr_exclusive_lbr);
+}
+
static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
@@ -302,6 +310,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return x86_pmu_extra_regs(val, event);
}
+/*
+ * Check if we can create event of a certain type (that no conflicting events
+ * are present).
+ */
+int x86_add_exclusive(unsigned int what)
+{
+ int ret = -EBUSY, i;
+
+ if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
+ return 0;
+
+ mutex_lock(&pmc_reserve_mutex);
+ for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
+ if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+ goto out;
+
+ atomic_inc(&x86_pmu.lbr_exclusive[what]);
+ ret = 0;
+
+out:
+ mutex_unlock(&pmc_reserve_mutex);
+ return ret;
+}
+
+void x86_del_exclusive(unsigned int what)
+{
+ atomic_dec(&x86_pmu.lbr_exclusive[what]);
+}
+
int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
@@ -346,6 +383,12 @@ int x86_setup_perfctr(struct perf_event *event)
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
+
+ /* disallow bts if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+
+ event->destroy = hw_perf_lbr_event_destroy;
}
hwc->config |= config;
@@ -399,39 +442,41 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
- /*
- * check that PEBS LBR correction does not conflict with
- * whatever the user is asking with attr->branch_sample_type
- */
- if (event->attr.precise_ip > 1 &&
- x86_pmu.intel_cap.pebs_format < 2) {
- u64 *br_type = &event->attr.branch_sample_type;
-
- if (has_branch_stack(event)) {
- if (!precise_br_compat(event))
- return -EOPNOTSUPP;
-
- /* branch_sample_type is compatible */
-
- } else {
- /*
- * user did not specify branch_sample_type
- *
- * For PEBS fixups, we capture all
- * the branches at the priv level of the
- * event.
- */
- *br_type = PERF_SAMPLE_BRANCH_ANY;
-
- if (!event->attr.exclude_user)
- *br_type |= PERF_SAMPLE_BRANCH_USER;
-
- if (!event->attr.exclude_kernel)
- *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
- }
+ }
+ /*
+ * check that PEBS LBR correction does not conflict with
+ * whatever the user is asking with attr->branch_sample_type
+ */
+ if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
+ u64 *br_type = &event->attr.branch_sample_type;
+
+ if (has_branch_stack(event)) {
+ if (!precise_br_compat(event))
+ return -EOPNOTSUPP;
+
+ /* branch_sample_type is compatible */
+
+ } else {
+ /*
+ * user did not specify branch_sample_type
+ *
+ * For PEBS fixups, we capture all
+ * the branches at the priv level of the
+ * event.
+ */
+ *br_type = PERF_SAMPLE_BRANCH_ANY;
+
+ if (!event->attr.exclude_user)
+ *br_type |= PERF_SAMPLE_BRANCH_USER;
+
+ if (!event->attr.exclude_kernel)
+ *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
}
}
+ if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
+ event->attach_state |= PERF_ATTACH_TASK_DATA;
+
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
@@ -449,6 +494,12 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ if (event->attr.sample_period && x86_pmu.limit_period) {
+ if (x86_pmu.limit_period(event, event->attr.sample_period) >
+ event->attr.sample_period)
+ return -EINVAL;
+ }
+
return x86_setup_perfctr(event);
}
@@ -728,14 +779,17 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
- int i, wmin, wmax, num = 0;
+ int i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ if (x86_pmu.start_scheduling)
+ x86_pmu.start_scheduling(cpuc);
+
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw;
- c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
+ c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
hwc->constraint = c;
wmin = min(wmin, c->weight);
@@ -768,24 +822,30 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */
if (i != n)
- num = perf_assign_events(cpuc->event_list, n, wmin,
- wmax, assign);
+ unsched = perf_assign_events(cpuc->event_list, n, wmin,
+ wmax, assign);
/*
- * Mark the event as committed, so we do not put_constraint()
- * in case new events are added and fail scheduling.
+ * In case of success (unsched = 0), mark events as committed,
+ * so we do not put_constraint() in case new events are added
+ * and fail to be scheduled
+ *
+ * We invoke the lower level commit callback to lock the resource
+ *
+ * We do not need to do all of this in case we are called to
+ * validate an event group (assign == NULL)
*/
- if (!num && assign) {
+ if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
e->hw.flags |= PERF_X86_EVENT_COMMITTED;
+ if (x86_pmu.commit_scheduling)
+ x86_pmu.commit_scheduling(cpuc, e, assign[i]);
}
}
- /*
- * scheduling failed or is just a simulation,
- * free resources if necessary
- */
- if (!assign || num) {
+
+ if (!assign || unsched) {
+
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
/*
@@ -795,11 +855,18 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
continue;
+ /*
+ * release events that failed scheduling
+ */
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
}
}
- return num ? -EINVAL : 0;
+
+ if (x86_pmu.stop_scheduling)
+ x86_pmu.stop_scheduling(cpuc);
+
+ return unsched ? -EINVAL : 0;
}
/*
@@ -986,6 +1053,9 @@ int x86_perf_event_set_period(struct perf_event *event)
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
+ if (x86_pmu.limit_period)
+ left = x86_pmu.limit_period(event, left);
+
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
@@ -1033,7 +1103,6 @@ static int x86_pmu_add(struct perf_event *event, int flags)
hwc = &event->hw;
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
@@ -1071,7 +1140,6 @@ done_collect:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
return ret;
}
@@ -1103,7 +1171,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- u64 pebs;
+ u64 pebs, debugctl;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
@@ -1121,14 +1189,20 @@ void perf_event_print_debug(void)
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
- rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
+ if (x86_pmu.pebs_constraints) {
+ rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
+ pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
+ }
+ if (x86_pmu.lbr_nr) {
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
+ }
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
@@ -1321,11 +1395,12 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int ret = NOTIFY_OK;
+ int i, ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- cpuc->kfree_on_online = NULL;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+ cpuc->kfree_on_online[i] = NULL;
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
@@ -1336,7 +1411,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
- kfree(cpuc->kfree_on_online);
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+ kfree(cpuc->kfree_on_online[i]);
+ cpuc->kfree_on_online[i] = NULL;
+ }
break;
case CPU_DYING:
@@ -1712,7 +1790,7 @@ static int validate_event(struct perf_event *event)
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
- c = x86_pmu.get_event_constraints(fake_cpuc, event);
+ c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
if (!c || !c->weight)
ret = -EINVAL;
@@ -1914,10 +1992,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
NULL,
};
-static void x86_pmu_flush_branch_stack(void)
+static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
- if (x86_pmu.flush_branch_stack)
- x86_pmu.flush_branch_stack();
+ if (x86_pmu.sched_task)
+ x86_pmu.sched_task(ctx, sched_in);
}
void perf_check_microcode(void)
@@ -1949,7 +2027,8 @@ static struct pmu pmu = {
.commit_txn = x86_pmu_commit_txn,
.event_idx = x86_pmu_event_idx,
- .flush_branch_stack = x86_pmu_flush_branch_stack,
+ .sched_task = x86_pmu_sched_task,
+ .task_ctx_size = sizeof(struct x86_perf_task_context),
};
void arch_perf_update_userpage(struct perf_event *event,
@@ -1968,13 +2047,23 @@ void arch_perf_update_userpage(struct perf_event *event,
data = cyc2ns_read_begin();
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always in the local_clock domain.
+ */
userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift;
userpg->time_offset = data->cyc2ns_offset - now;
- userpg->cap_user_time_zero = 1;
- userpg->time_zero = data->cyc2ns_offset;
+ /*
+ * cap_user_time_zero doesn't make sense when we're using a different
+ * time base for the records.
+ */
+ if (event->clock == &local_clock) {
+ userpg->cap_user_time_zero = 1;
+ userpg->time_zero = data->cyc2ns_offset;
+ }
cyc2ns_read_end(data);
}
@@ -2147,24 +2236,24 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
static unsigned long code_segment_base(struct pt_regs *regs)
{
/*
+ * For IA32 we look at the GDT/LDT segment base to convert the
+ * effective IP to a linear address.
+ */
+
+#ifdef CONFIG_X86_32
+ /*
* If we are in VM86 mode, add the segment offset to convert to a
* linear address.
*/
if (regs->flags & X86_VM_MASK)
return 0x10 * regs->cs;
- /*
- * For IA32 we look at the GDT/LDT segment base to convert the
- * effective IP to a linear address.
- */
-#ifdef CONFIG_X86_32
if (user_mode(regs) && regs->cs != __USER_CS)
return get_segment_base(regs->cs);
#else
- if (test_thread_flag(TIF_IA32)) {
- if (user_mode(regs) && regs->cs != __USER32_CS)
- return get_segment_base(regs->cs);
- }
+ if (user_mode(regs) && !user_64bit_mode(regs) &&
+ regs->cs != __USER32_CS)
+ return get_segment_base(regs->cs);
#endif
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index df525d2..329f035 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -71,6 +71,8 @@ struct event_constraint {
#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL 0x40 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC 0x80 /* dynamic alloc'd constraint */
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
@@ -123,8 +125,37 @@ struct intel_shared_regs {
unsigned core_id; /* per-core: core id */
};
+enum intel_excl_state_type {
+ INTEL_EXCL_UNUSED = 0, /* counter is unused */
+ INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */
+ INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
+};
+
+struct intel_excl_states {
+ enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
+ enum intel_excl_state_type state[X86_PMC_IDX_MAX];
+ int num_alloc_cntrs;/* #counters allocated */
+ int max_alloc_cntrs;/* max #counters allowed */
+ bool sched_started; /* true if scheduling has started */
+};
+
+struct intel_excl_cntrs {
+ raw_spinlock_t lock;
+
+ struct intel_excl_states states[2];
+
+ int refcnt; /* per-core: #HT threads */
+ unsigned core_id; /* per-core: core id */
+};
+
#define MAX_LBR_ENTRIES 16
+enum {
+ X86_PERF_KFREE_SHARED = 0,
+ X86_PERF_KFREE_EXCL = 1,
+ X86_PERF_KFREE_MAX
+};
+
struct cpu_hw_events {
/*
* Generic x86 PMC bits
@@ -179,6 +210,12 @@ struct cpu_hw_events {
* used on Intel NHM/WSM/SNB
*/
struct intel_shared_regs *shared_regs;
+ /*
+ * manage exclusive counter access between hyperthread
+ */
+ struct event_constraint *constraint_list; /* in enable order */
+ struct intel_excl_cntrs *excl_cntrs;
+ int excl_thread_id; /* 0 or 1 */
/*
* AMD specific bits
@@ -187,7 +224,7 @@ struct cpu_hw_events {
/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
u64 perf_ctr_virt_mask;
- void *kfree_on_online;
+ void *kfree_on_online[X86_PERF_KFREE_MAX];
};
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
@@ -202,6 +239,10 @@ struct cpu_hw_events {
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
+ 0, PERF_X86_EVENT_EXCL)
+
/*
* The overlap flag marks event constraints with overlapping counter
* masks. This is the case if the counter mask of such an event is not
@@ -259,6 +300,10 @@ struct cpu_hw_events {
#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
+
#define INTEL_PLD_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
@@ -283,22 +328,40 @@ struct cpu_hw_events {
/* Check flags and event code, and set the HSW load flag */
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
- __EVENT_CONSTRAINT(code, n, \
+ __EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW store flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW load flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, \
+ PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
@@ -408,6 +471,13 @@ union x86_pmu_config {
#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
+enum {
+ x86_lbr_exclusive_lbr,
+ x86_lbr_exclusive_bts,
+ x86_lbr_exclusive_pt,
+ x86_lbr_exclusive_max,
+};
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -443,14 +513,25 @@ struct x86_pmu {
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
+ int idx,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
+
+ void (*commit_scheduling)(struct cpu_hw_events *cpuc,
+ struct perf_event *event,
+ int cntr);
+
+ void (*start_scheduling)(struct cpu_hw_events *cpuc);
+
+ void (*stop_scheduling)(struct cpu_hw_events *cpuc);
+
struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
bool late_ack;
+ unsigned (*limit_period)(struct perf_event *event, unsigned l);
/*
* sysfs attrs
@@ -472,7 +553,8 @@ struct x86_pmu {
void (*cpu_dead)(int cpu);
void (*check_microcode)(void);
- void (*flush_branch_stack)(void);
+ void (*sched_task)(struct perf_event_context *ctx,
+ bool sched_in);
/*
* Intel Arch Perfmon v2+
@@ -504,10 +586,15 @@ struct x86_pmu {
bool lbr_double_abort; /* duplicated lbr aborts */
/*
+ * Intel PT/LBR/BTS are exclusive
+ */
+ atomic_t lbr_exclusive[x86_lbr_exclusive_max];
+
+ /*
* Extra registers for events
*/
struct extra_reg *extra_regs;
- unsigned int er_flags;
+ unsigned int flags;
/*
* Intel host/guest support (KVM)
@@ -515,6 +602,13 @@ struct x86_pmu {
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
+struct x86_perf_task_context {
+ u64 lbr_from[MAX_LBR_ENTRIES];
+ u64 lbr_to[MAX_LBR_ENTRIES];
+ int lbr_callstack_users;
+ int lbr_stack_state;
+};
+
#define x86_add_quirk(func_) \
do { \
static struct x86_pmu_quirk __quirk __initdata = { \
@@ -524,8 +618,13 @@ do { \
x86_pmu.quirks = &__quirk; \
} while (0)
-#define ERF_NO_HT_SHARING 1
-#define ERF_HAS_RSP_1 2
+/*
+ * x86_pmu flags
+ */
+#define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
+#define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
+#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
+#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -546,6 +645,12 @@ static struct perf_pmu_events_attr event_attr_##v = { \
extern struct x86_pmu x86_pmu __read_mostly;
+static inline bool x86_pmu_has_lbr_callstack(void)
+{
+ return x86_pmu.lbr_sel_map &&
+ x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
+}
+
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
int x86_perf_event_set_period(struct perf_event *event);
@@ -588,6 +693,12 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
+int x86_add_exclusive(unsigned int what);
+
+void x86_del_exclusive(unsigned int what);
+
+void hw_perf_lbr_event_destroy(struct perf_event *event);
+
int x86_setup_perfctr(struct perf_event *event);
int x86_pmu_hw_config(struct perf_event *event);
@@ -674,10 +785,34 @@ static inline int amd_pmu_init(void)
#ifdef CONFIG_CPU_SUP_INTEL
+static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
+{
+ /* user explicitly requested branch sampling */
+ if (has_branch_stack(event))
+ return true;
+
+ /* implicit branch sampling to correct PEBS skid */
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
+ x86_pmu.intel_cap.pebs_format < 2)
+ return true;
+
+ return false;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+ if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+ !event->attr.freq && event->hw.sample_period == 1)
+ return true;
+
+ return false;
+}
+
int intel_pmu_save_and_restart(struct perf_event *event);
struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
+x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event);
struct intel_shared_regs *allocate_shared_regs(int cpu);
@@ -727,13 +862,15 @@ void intel_pmu_pebs_disable_all(void);
void intel_ds_init(void);
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
+
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_enable(struct perf_event *event);
void intel_pmu_lbr_disable(struct perf_event *event);
-void intel_pmu_lbr_enable_all(void);
+void intel_pmu_lbr_enable_all(bool pmi);
void intel_pmu_lbr_disable_all(void);
@@ -747,8 +884,18 @@ void intel_pmu_lbr_init_atom(void);
void intel_pmu_lbr_init_snb(void);
+void intel_pmu_lbr_init_hsw(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
+void intel_pt_interrupt(void);
+
+int intel_bts_interrupt(void);
+
+void intel_bts_enable_local(void);
+
+void intel_bts_disable_local(void);
+
int p4_pmu_init(void);
int p6_pmu_init(void);
@@ -758,6 +905,10 @@ int knc_pmu_init(void);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
+static inline int is_ht_workaround_enabled(void)
+{
+ return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
+}
#else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 2892631..1cee5d2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -382,6 +382,7 @@ static int amd_pmu_cpu_prepare(int cpu)
static void amd_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
struct amd_nb *nb;
int i, nb_id;
@@ -399,7 +400,7 @@ static void amd_pmu_cpu_starting(int cpu)
continue;
if (nb->nb_id == nb_id) {
- cpuc->kfree_on_online = cpuc->amd_nb;
+ *onln = cpuc->amd_nb;
cpuc->amd_nb = nb;
break;
}
@@ -429,7 +430,8 @@ static void amd_pmu_cpu_dead(int cpu)
}
static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
/*
* if not NB event or no NB, then no constraints
@@ -537,7 +539,8 @@ static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
static struct event_constraint *
-amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
+amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int event_code = amd_get_event_code(hwc);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index a61f5c6..989d3c2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -796,7 +796,7 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
* the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
* is using the new offset.
*/
-static int force_ibs_eilvt_setup(void)
+static void force_ibs_eilvt_setup(void)
{
int offset;
int ret;
@@ -811,26 +811,24 @@ static int force_ibs_eilvt_setup(void)
if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n");
- return -EBUSY;
+ return;
}
ret = setup_ibs_ctl(offset);
if (ret)
goto out;
- if (!ibs_eilvt_valid()) {
- ret = -EFAULT;
+ if (!ibs_eilvt_valid())
goto out;
- }
pr_info("IBS: LVT offset %d assigned\n", offset);
- return 0;
+ return;
out:
preempt_disable();
put_eilvt(offset);
preempt_enable();
- return ret;
+ return;
}
static void ibs_eilvt_setup(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 498b6d9..9da2400 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/watchdog.h>
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
@@ -113,6 +114,12 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
@@ -131,15 +138,12 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
- /*
- * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
- * siblings; disable these events because they can corrupt unrelated
- * counters.
- */
- INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
EVENT_CONSTRAINT_END
};
@@ -212,11 +216,26 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
- INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
- INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
- INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
+
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+
+ EVENT_CONSTRAINT_END
+};
+
+struct event_constraint intel_bdw_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
+ INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
EVENT_CONSTRAINT_END
};
@@ -415,6 +434,202 @@ static __initconst const u64 snb_hw_cache_event_ids
};
+/*
+ * Notes on the events:
+ * - data reads do not include code reads (comparable to earlier tables)
+ * - data counts include speculative execution (except L1 write, dtlb, bpu)
+ * - remote node access includes remote memory, remote cache, remote mmio.
+ * - prefetches are not included in the counts because they are not
+ * reliably counted.
+ */
+
+#define HSW_DEMAND_DATA_RD BIT_ULL(0)
+#define HSW_DEMAND_RFO BIT_ULL(1)
+#define HSW_ANY_RESPONSE BIT_ULL(16)
+#define HSW_SUPPLIER_NONE BIT_ULL(17)
+#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
+#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
+#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
+#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
+#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
+ HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
+ HSW_L3_MISS_REMOTE_HOP2P)
+#define HSW_SNOOP_NONE BIT_ULL(31)
+#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
+#define HSW_SNOOP_MISS BIT_ULL(33)
+#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
+#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
+#define HSW_SNOOP_HITM BIT_ULL(36)
+#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
+#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
+ HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
+ HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
+ HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
+#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
+#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
+#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
+#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
+ HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
+#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
+
+#define BDW_L3_MISS_LOCAL BIT(26)
+#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
+ HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
+ HSW_L3_MISS_REMOTE_HOP2P)
+
+
+static __initconst const u64 hsw_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
+ [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+};
+
+static __initconst const u64 hsw_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
+ HSW_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS|HSW_ANY_SNOOP,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
+ HSW_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS|HSW_ANY_SNOOP,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS_LOCAL_DRAM|
+ HSW_SNOOP_DRAM,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
+ HSW_L3_MISS_REMOTE|
+ HSW_SNOOP_DRAM,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS_LOCAL_DRAM|
+ HSW_SNOOP_DRAM,
+ [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
+ HSW_L3_MISS_REMOTE|
+ HSW_SNOOP_DRAM,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+};
+
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1029,21 +1244,10 @@ static __initconst const u64 slm_hw_cache_event_ids
},
};
-static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
-{
- /* user explicitly requested branch sampling */
- if (has_branch_stack(event))
- return true;
-
- /* implicit branch sampling to correct PEBS skid */
- if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
- x86_pmu.intel_cap.pebs_format < 2)
- return true;
-
- return false;
-}
-
-static void intel_pmu_disable_all(void)
+/*
+ * Use from PMIs where the LBRs are already disabled.
+ */
+static void __intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1051,17 +1255,24 @@ static void intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
+ else
+ intel_bts_disable_local();
intel_pmu_pebs_disable_all();
+}
+
+static void intel_pmu_disable_all(void)
+{
+ __intel_pmu_disable_all();
intel_pmu_lbr_disable_all();
}
-static void intel_pmu_enable_all(int added)
+static void __intel_pmu_enable_all(int added, bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
intel_pmu_pebs_enable_all();
- intel_pmu_lbr_enable_all();
+ intel_pmu_lbr_enable_all(pmi);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
@@ -1073,7 +1284,13 @@ static void intel_pmu_enable_all(int added)
return;
intel_pmu_enable_bts(event->hw.config);
- }
+ } else
+ intel_bts_enable_local();
+}
+
+static void intel_pmu_enable_all(int added)
+{
+ __intel_pmu_enable_all(added, false);
}
/*
@@ -1207,7 +1424,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
* must disable before any actual event
* because any event may be combined with LBR
*/
- if (intel_pmu_needs_lbr_smpl(event))
+ if (needs_branch_stack(event))
intel_pmu_lbr_disable(event);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -1268,7 +1485,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
* must enabled before any actual event
* because any event may be combined with LBR
*/
- if (intel_pmu_needs_lbr_smpl(event))
+ if (needs_branch_stack(event))
intel_pmu_lbr_enable(event);
if (event->attr.exclude_host)
@@ -1334,6 +1551,18 @@ static void intel_pmu_reset(void)
if (ds)
ds->bts_index = ds->bts_buffer_base;
+ /* Ack all overflows and disable fixed counters */
+ if (x86_pmu.version >= 2) {
+ intel_pmu_ack_status(intel_pmu_get_status());
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ }
+
+ /* Reset LBRs and LBR freezing */
+ if (x86_pmu.lbr_nr) {
+ update_debugctlmsr(get_debugctlmsr() &
+ ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
+ }
+
local_irq_restore(flags);
}
@@ -1357,8 +1586,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/
if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
- intel_pmu_disable_all();
+ __intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
+ handled += intel_bts_interrupt();
status = intel_pmu_get_status();
if (!status)
goto done;
@@ -1399,6 +1629,14 @@ again:
}
/*
+ * Intel PT
+ */
+ if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+ handled++;
+ intel_pt_interrupt();
+ }
+
+ /*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
* bit. Therefore always force probe these counters.
@@ -1433,7 +1671,7 @@ again:
goto again;
done:
- intel_pmu_enable_all(0);
+ __intel_pmu_enable_all(0, true);
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
@@ -1464,7 +1702,7 @@ intel_bts_constraints(struct perf_event *event)
static int intel_alt_er(int idx)
{
- if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+ if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
return idx;
if (idx == EXTRA_REG_RSP_0)
@@ -1624,7 +1862,8 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
}
struct event_constraint *
-x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct event_constraint *c;
@@ -1641,7 +1880,8 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
}
static struct event_constraint *
-intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
struct event_constraint *c;
@@ -1649,15 +1889,286 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
- c = intel_pebs_constraints(event);
+ c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
- c = intel_shared_regs_constraints(cpuc, event);
+ c = intel_pebs_constraints(event);
if (c)
return c;
- return x86_get_event_constraints(cpuc, event);
+ return x86_get_event_constraints(cpuc, idx, event);
+}
+
+static void
+intel_start_scheduling(struct cpu_hw_events *cpuc)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* sibling thread */
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return;
+
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return;
+
+ xlo = &excl_cntrs->states[o_tid];
+ xl = &excl_cntrs->states[tid];
+
+ xl->sched_started = true;
+ xl->num_alloc_cntrs = 0;
+ /*
+ * lock shared state until we are done scheduling
+ * in stop_event_scheduling()
+ * makes scheduling appear as a transaction
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ raw_spin_lock(&excl_cntrs->lock);
+
+ /*
+ * save initial state of sibling thread
+ */
+ memcpy(xlo->init_state, xlo->state, sizeof(xlo->init_state));
+}
+
+static void
+intel_stop_scheduling(struct cpu_hw_events *cpuc)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* sibling thread */
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return;
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return;
+
+ xlo = &excl_cntrs->states[o_tid];
+ xl = &excl_cntrs->states[tid];
+
+ /*
+ * make new sibling thread state visible
+ */
+ memcpy(xlo->state, xlo->init_state, sizeof(xlo->state));
+
+ xl->sched_started = false;
+ /*
+ * release shared state lock (acquired in intel_start_scheduling())
+ */
+ raw_spin_unlock(&excl_cntrs->lock);
+}
+
+static struct event_constraint *
+intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+ int idx, struct event_constraint *c)
+{
+ struct event_constraint *cx;
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xl, *xlo;
+ int is_excl, i;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid; /* alternate */
+
+ /*
+ * validating a group does not require
+ * enforcing cross-thread exclusion
+ */
+ if (cpuc->is_fake || !is_ht_workaround_enabled())
+ return c;
+
+ /*
+ * no exclusion needed
+ */
+ if (!excl_cntrs)
+ return c;
+ /*
+ * event requires exclusive counter access
+ * across HT threads
+ */
+ is_excl = c->flags & PERF_X86_EVENT_EXCL;
+
+ /*
+ * xl = state of current HT
+ * xlo = state of sibling HT
+ */
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ /*
+ * do not allow scheduling of more than max_alloc_cntrs
+ * which is set to half the available generic counters.
+ * this helps avoid counter starvation of sibling thread
+ * by ensuring at most half the counters cannot be in
+ * exclusive mode. There is not designated counters for the
+ * limits. Any N/2 counters can be used. This helps with
+ * events with specifix counter constraints
+ */
+ if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
+ return &emptyconstraint;
+
+ cx = c;
+
+ /*
+ * because we modify the constraint, we need
+ * to make a copy. Static constraints come
+ * from static const tables.
+ *
+ * only needed when constraint has not yet
+ * been cloned (marked dynamic)
+ */
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+
+ /* sanity check */
+ if (idx < 0)
+ return &emptyconstraint;
+
+ /*
+ * grab pre-allocated constraint entry
+ */
+ cx = &cpuc->constraint_list[idx];
+
+ /*
+ * initialize dynamic constraint
+ * with static constraint
+ */
+ memcpy(cx, c, sizeof(*cx));
+
+ /*
+ * mark constraint as dynamic, so we
+ * can free it later on
+ */
+ cx->flags |= PERF_X86_EVENT_DYNAMIC;
+ }
+
+ /*
+ * From here on, the constraint is dynamic.
+ * Either it was just allocated above, or it
+ * was allocated during a earlier invocation
+ * of this function
+ */
+
+ /*
+ * Modify static constraint with current dynamic
+ * state of thread
+ *
+ * EXCLUSIVE: sibling counter measuring exclusive event
+ * SHARED : sibling counter measuring non-exclusive event
+ * UNUSED : sibling counter unused
+ */
+ for_each_set_bit(i, cx->idxmsk, X86_PMC_IDX_MAX) {
+ /*
+ * exclusive event in sibling counter
+ * our corresponding counter cannot be used
+ * regardless of our event
+ */
+ if (xl->state[i] == INTEL_EXCL_EXCLUSIVE)
+ __clear_bit(i, cx->idxmsk);
+ /*
+ * if measuring an exclusive event, sibling
+ * measuring non-exclusive, then counter cannot
+ * be used
+ */
+ if (is_excl && xl->state[i] == INTEL_EXCL_SHARED)
+ __clear_bit(i, cx->idxmsk);
+ }
+
+ /*
+ * recompute actual bit weight for scheduling algorithm
+ */
+ cx->weight = hweight64(cx->idxmsk64);
+
+ /*
+ * if we return an empty mask, then switch
+ * back to static empty constraint to avoid
+ * the cost of freeing later on
+ */
+ if (cx->weight == 0)
+ cx = &emptyconstraint;
+
+ return cx;
+}
+
+static struct event_constraint *
+intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c1 = event->hw.constraint;
+ struct event_constraint *c2;
+
+ /*
+ * first time only
+ * - static constraint: no change across incremental scheduling calls
+ * - dynamic constraint: handled by intel_get_excl_constraints()
+ */
+ c2 = __intel_get_event_constraints(cpuc, idx, event);
+ if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+ bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
+ c1->weight = c2->weight;
+ c2 = c1;
+ }
+
+ if (cpuc->excl_cntrs)
+ return intel_get_excl_constraints(cpuc, event, idx, c2);
+
+ return c2;
+}
+
+static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct intel_excl_states *xlo, *xl;
+ unsigned long flags = 0; /* keep compiler happy */
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid;
+
+ /*
+ * nothing needed if in group validation mode
+ */
+ if (cpuc->is_fake)
+ return;
+
+ WARN_ON_ONCE(!excl_cntrs);
+
+ if (!excl_cntrs)
+ return;
+
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ /*
+ * put_constraint may be called from x86_schedule_events()
+ * which already has the lock held so here make locking
+ * conditional
+ */
+ if (!xl->sched_started)
+ raw_spin_lock_irqsave(&excl_cntrs->lock, flags);
+
+ /*
+ * if event was actually assigned, then mark the
+ * counter state as unused now
+ */
+ if (hwc->idx >= 0)
+ xlo->state[hwc->idx] = INTEL_EXCL_UNUSED;
+
+ if (!xl->sched_started)
+ raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags);
}
static void
@@ -1678,7 +2189,57 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
+ struct event_constraint *c = event->hw.constraint;
+
intel_put_shared_regs_event_constraints(cpuc, event);
+
+ /*
+ * is PMU has exclusive counter restrictions, then
+ * all events are subject to and must call the
+ * put_excl_constraints() routine
+ */
+ if (c && cpuc->excl_cntrs)
+ intel_put_excl_constraints(cpuc, event);
+
+ /* cleanup dynamic constraint */
+ if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
+ event->hw.constraint = NULL;
+}
+
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc,
+ struct perf_event *event, int cntr)
+{
+ struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+ struct event_constraint *c = event->hw.constraint;
+ struct intel_excl_states *xlo, *xl;
+ int tid = cpuc->excl_thread_id;
+ int o_tid = 1 - tid;
+ int is_excl;
+
+ if (cpuc->is_fake || !c)
+ return;
+
+ is_excl = c->flags & PERF_X86_EVENT_EXCL;
+
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
+ return;
+
+ WARN_ON_ONCE(!excl_cntrs);
+
+ if (!excl_cntrs)
+ return;
+
+ xl = &excl_cntrs->states[tid];
+ xlo = &excl_cntrs->states[o_tid];
+
+ WARN_ON_ONCE(!raw_spin_is_locked(&excl_cntrs->lock));
+
+ if (cntr >= 0) {
+ if (is_excl)
+ xlo->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+ else
+ xlo->init_state[cntr] = INTEL_EXCL_SHARED;
+ }
}
static void intel_pebs_aliases_core2(struct perf_event *event)
@@ -1747,10 +2308,21 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip && x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
- if (intel_pmu_needs_lbr_smpl(event)) {
+ if (needs_branch_stack(event)) {
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
+
+ /*
+ * BTS is set up earlier in this path, so don't account twice
+ */
+ if (!intel_pmu_has_bts(event)) {
+ /* disallow lbr if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+
+ event->destroy = hw_perf_lbr_event_destroy;
+ }
}
if (event->attr.type != PERF_TYPE_RAW)
@@ -1891,9 +2463,12 @@ static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
static struct event_constraint *
-hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
{
- struct event_constraint *c = intel_get_event_constraints(cpuc, event);
+ struct event_constraint *c;
+
+ c = intel_get_event_constraints(cpuc, idx, event);
/* Handle special quirk on in_tx_checkpointed only in counter 2 */
if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
@@ -1905,6 +2480,32 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return c;
}
+/*
+ * Broadwell:
+ *
+ * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
+ * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
+ * the two to enforce a minimum period of 128 (the smallest value that has bits
+ * 0-5 cleared and >= 100).
+ *
+ * Because of how the code in x86_perf_event_set_period() works, the truncation
+ * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
+ * to make up for the 'lost' events due to carrying the 'error' in period_left.
+ *
+ * Therefore the effective (average) period matches the requested period,
+ * despite coarser hardware granularity.
+ */
+static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+{
+ if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+ X86_CONFIG(.event=0xc0, .umask=0x01)) {
+ if (left < 128)
+ left = 128;
+ left &= ~0x3fu;
+ }
+ return left;
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -1979,16 +2580,52 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
return regs;
}
+static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
+{
+ struct intel_excl_cntrs *c;
+ int i;
+
+ c = kzalloc_node(sizeof(struct intel_excl_cntrs),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (c) {
+ raw_spin_lock_init(&c->lock);
+ for (i = 0; i < X86_PMC_IDX_MAX; i++) {
+ c->states[0].state[i] = INTEL_EXCL_UNUSED;
+ c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
+
+ c->states[1].state[i] = INTEL_EXCL_UNUSED;
+ c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
+ }
+ c->core_id = -1;
+ }
+ return c;
+}
+
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
- return NOTIFY_OK;
+ if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+ cpuc->shared_regs = allocate_shared_regs(cpu);
+ if (!cpuc->shared_regs)
+ return NOTIFY_BAD;
+ }
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+
+ cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+ if (!cpuc->constraint_list)
+ return NOTIFY_BAD;
+
+ cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+ if (!cpuc->excl_cntrs) {
+ kfree(cpuc->constraint_list);
+ kfree(cpuc->shared_regs);
+ return NOTIFY_BAD;
+ }
+ cpuc->excl_thread_id = 0;
+ }
return NOTIFY_OK;
}
@@ -2010,13 +2647,15 @@ static void intel_pmu_cpu_starting(int cpu)
if (!cpuc->shared_regs)
return;
- if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
+ if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
+ void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
+
for_each_cpu(i, topology_thread_cpumask(cpu)) {
struct intel_shared_regs *pc;
pc = per_cpu(cpu_hw_events, i).shared_regs;
if (pc && pc->core_id == core_id) {
- cpuc->kfree_on_online = cpuc->shared_regs;
+ *onln = cpuc->shared_regs;
cpuc->shared_regs = pc;
break;
}
@@ -2027,6 +2666,44 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.lbr_sel_map)
cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
+
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ int h = x86_pmu.num_counters >> 1;
+
+ for_each_cpu(i, topology_thread_cpumask(cpu)) {
+ struct intel_excl_cntrs *c;
+
+ c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ if (c && c->core_id == core_id) {
+ cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
+ cpuc->excl_cntrs = c;
+ cpuc->excl_thread_id = 1;
+ break;
+ }
+ }
+ cpuc->excl_cntrs->core_id = core_id;
+ cpuc->excl_cntrs->refcnt++;
+ /*
+ * set hard limit to half the number of generic counters
+ */
+ cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
+ cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
+ }
+}
+
+static void free_excl_cntrs(int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_excl_cntrs *c;
+
+ c = cpuc->excl_cntrs;
+ if (c) {
+ if (c->core_id == -1 || --c->refcnt == 0)
+ kfree(c);
+ cpuc->excl_cntrs = NULL;
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+ }
}
static void intel_pmu_cpu_dying(int cpu)
@@ -2041,19 +2718,9 @@ static void intel_pmu_cpu_dying(int cpu)
cpuc->shared_regs = NULL;
}
- fini_debug_store_on_cpu(cpu);
-}
+ free_excl_cntrs(cpu);
-static void intel_pmu_flush_branch_stack(void)
-{
- /*
- * Intel LBR does not tag entries with the
- * PID of the current task, then we need to
- * flush it on ctxsw
- * For now, we simply reset it
- */
- if (x86_pmu.lbr_nr)
- intel_pmu_lbr_reset();
+ fini_debug_store_on_cpu(cpu);
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -2107,7 +2774,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.guest_get_msrs = intel_guest_get_msrs,
- .flush_branch_stack = intel_pmu_flush_branch_stack,
+ .sched_task = intel_pmu_lbr_sched_task,
};
static __init void intel_clovertown_quirk(void)
@@ -2264,6 +2931,27 @@ static __init void intel_nehalem_quirk(void)
}
}
+/*
+ * enable software workaround for errata:
+ * SNB: BJ122
+ * IVB: BV98
+ * HSW: HSD29
+ *
+ * Only needed when HT is enabled. However detecting
+ * if HT is enabled is difficult (model specific). So instead,
+ * we enable the workaround in the early boot, and verify if
+ * it is needed in a later initcall phase once we have valid
+ * topology information to check if HT is actually enabled
+ */
+static __init void intel_ht_bug(void)
+{
+ x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
+
+ x86_pmu.commit_scheduling = intel_commit_scheduling;
+ x86_pmu.start_scheduling = intel_start_scheduling;
+ x86_pmu.stop_scheduling = intel_stop_scheduling;
+}
+
EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
@@ -2443,7 +3131,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_slm_event_constraints;
x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
x86_pmu.extra_regs = intel_slm_extra_regs;
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
pr_cont("Silvermont events, ");
break;
@@ -2461,7 +3149,7 @@ __init int intel_pmu_init(void)
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = nhm_events_attrs;
@@ -2478,6 +3166,7 @@ __init int intel_pmu_init(void)
case 42: /* 32nm SandyBridge */
case 45: /* 32nm SandyBridge-E/EN/EP */
x86_add_quirk(intel_sandybridge_quirk);
+ x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@@ -2492,9 +3181,11 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
+
+
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
@@ -2510,6 +3201,7 @@ __init int intel_pmu_init(void)
case 58: /* 22nm IvyBridge */
case 62: /* 22nm IvyBridge-EP/EX */
+ x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
@@ -2528,8 +3220,8 @@ __init int intel_pmu_init(void)
else
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
@@ -2545,19 +3237,20 @@ __init int intel_pmu_init(void)
case 63: /* 22nm Haswell Server */
case 69: /* 22nm Haswell ULT */
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+ x86_add_quirk(intel_ht_bug);
x86_pmu.late_ack = true;
- memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
- intel_pmu_lbr_init_snb();
+ intel_pmu_lbr_init_hsw();
x86_pmu.event_constraints = intel_hsw_event_constraints;
x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
/* all extra regs are per-cpu when HT is on */
- x86_pmu.er_flags |= ERF_HAS_RSP_1;
- x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
@@ -2566,6 +3259,39 @@ __init int intel_pmu_init(void)
pr_cont("Haswell events, ");
break;
+ case 61: /* 14nm Broadwell Core-M */
+ case 86: /* 14nm Broadwell Xeon D */
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+
+ /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
+ hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
+ BDW_L3_MISS|HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
+ HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
+ BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
+ hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
+ BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
+
+ intel_pmu_lbr_init_snb();
+
+ x86_pmu.event_constraints = intel_bdw_event_constraints;
+ x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = hsw_get_event_constraints;
+ x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.limit_period = bdw_limit_period;
+ pr_cont("Broadwell events, ");
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -2651,3 +3377,47 @@ __init int intel_pmu_init(void)
return 0;
}
+
+/*
+ * HT bug: phase 2 init
+ * Called once we have valid topology information to check
+ * whether or not HT is enabled
+ * If HT is off, then we disable the workaround
+ */
+static __init int fixup_ht_bug(void)
+{
+ int cpu = smp_processor_id();
+ int w, c;
+ /*
+ * problem not present on this CPU model, nothing to do
+ */
+ if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
+ return 0;
+
+ w = cpumask_weight(topology_thread_cpumask(cpu));
+ if (w > 1) {
+ pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
+ return 0;
+ }
+
+ watchdog_nmi_disable_all();
+
+ x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
+
+ x86_pmu.commit_scheduling = NULL;
+ x86_pmu.start_scheduling = NULL;
+ x86_pmu.stop_scheduling = NULL;
+
+ watchdog_nmi_enable_all();
+
+ get_online_cpus();
+
+ for_each_online_cpu(c) {
+ free_excl_cntrs(c);
+ }
+
+ put_online_cpus();
+ pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+ return 0;
+}
+subsys_initcall(fixup_ht_bug)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
new file mode 100644
index 0000000..ac1f0c5
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -0,0 +1,525 @@
+/*
+ * BTS PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/coredump.h>
+
+#include <asm-generic/sizes.h>
+#include <asm/perf_event.h>
+
+#include "perf_event.h"
+
+struct bts_ctx {
+ struct perf_output_handle handle;
+ struct debug_store ds_back;
+ int started;
+};
+
+static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
+
+#define BTS_RECORD_SIZE 24
+#define BTS_SAFETY_MARGIN 4080
+
+struct bts_phys {
+ struct page *page;
+ unsigned long size;
+ unsigned long offset;
+ unsigned long displacement;
+};
+
+struct bts_buffer {
+ size_t real_size; /* multiple of BTS_RECORD_SIZE */
+ unsigned int nr_pages;
+ unsigned int nr_bufs;
+ unsigned int cur_buf;
+ bool snapshot;
+ local_t data_size;
+ local_t lost;
+ local_t head;
+ unsigned long end;
+ void **data_pages;
+ struct bts_phys buf[0];
+};
+
+struct pmu bts_pmu;
+
+void intel_pmu_enable_bts(u64 config);
+void intel_pmu_disable_bts(void);
+
+static size_t buf_size(struct page *page)
+{
+ return 1 << (PAGE_SHIFT + page_private(page));
+}
+
+static void *
+bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
+{
+ struct bts_buffer *buf;
+ struct page *page;
+ int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
+ unsigned long offset;
+ size_t size = nr_pages << PAGE_SHIFT;
+ int pg, nbuf, pad;
+
+ /* count all the high order buffers */
+ for (pg = 0, nbuf = 0; pg < nr_pages;) {
+ page = virt_to_page(pages[pg]);
+ if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
+ return NULL;
+ pg += 1 << page_private(page);
+ nbuf++;
+ }
+
+ /*
+ * to avoid interrupts in overwrite mode, only allow one physical
+ */
+ if (overwrite && nbuf > 1)
+ return NULL;
+
+ buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->nr_pages = nr_pages;
+ buf->nr_bufs = nbuf;
+ buf->snapshot = overwrite;
+ buf->data_pages = pages;
+ buf->real_size = size - size % BTS_RECORD_SIZE;
+
+ for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
+ unsigned int __nr_pages;
+
+ page = virt_to_page(pages[pg]);
+ __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
+ buf->buf[nbuf].page = page;
+ buf->buf[nbuf].offset = offset;
+ buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
+ buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
+ pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
+ buf->buf[nbuf].size -= pad;
+
+ pg += __nr_pages;
+ offset += __nr_pages << PAGE_SHIFT;
+ }
+
+ return buf;
+}
+
+static void bts_buffer_free_aux(void *data)
+{
+ kfree(data);
+}
+
+static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
+{
+ return buf->buf[idx].offset + buf->buf[idx].displacement;
+}
+
+static void
+bts_config_buffer(struct bts_buffer *buf)
+{
+ int cpu = raw_smp_processor_id();
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ struct bts_phys *phys = &buf->buf[buf->cur_buf];
+ unsigned long index, thresh = 0, end = phys->size;
+ struct page *page = phys->page;
+
+ index = local_read(&buf->head);
+
+ if (!buf->snapshot) {
+ if (buf->end < phys->offset + buf_size(page))
+ end = buf->end - phys->offset - phys->displacement;
+
+ index -= phys->offset + phys->displacement;
+
+ if (end - index > BTS_SAFETY_MARGIN)
+ thresh = end - BTS_SAFETY_MARGIN;
+ else if (end - index > BTS_RECORD_SIZE)
+ thresh = end - BTS_RECORD_SIZE;
+ else
+ thresh = end;
+ }
+
+ ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
+ ds->bts_index = ds->bts_buffer_base + index;
+ ds->bts_absolute_maximum = ds->bts_buffer_base + end;
+ ds->bts_interrupt_threshold = !buf->snapshot
+ ? ds->bts_buffer_base + thresh
+ : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
+}
+
+static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
+{
+ unsigned long index = head - phys->offset;
+
+ memset(page_address(phys->page) + index, 0, phys->size - index);
+}
+
+static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts)
+{
+ if (buf->snapshot)
+ return false;
+
+ if (local_read(&buf->data_size) >= bts->handle.size ||
+ bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE)
+ return true;
+
+ return false;
+}
+
+static void bts_update(struct bts_ctx *bts)
+{
+ int cpu = raw_smp_processor_id();
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
+
+ if (!buf)
+ return;
+
+ head = index + bts_buffer_offset(buf, buf->cur_buf);
+ old = local_xchg(&buf->head, head);
+
+ if (!buf->snapshot) {
+ if (old == head)
+ return;
+
+ if (ds->bts_index >= ds->bts_absolute_maximum)
+ local_inc(&buf->lost);
+
+ /*
+ * old and head are always in the same physical buffer, so we
+ * can subtract them to get the data size.
+ */
+ local_add(head - old, &buf->data_size);
+ } else {
+ local_set(&buf->data_size, head);
+ }
+}
+
+static void __bts_event_start(struct perf_event *event)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ u64 config = 0;
+
+ if (!buf || bts_buffer_is_full(buf, bts))
+ return;
+
+ event->hw.state = 0;
+
+ if (!buf->snapshot)
+ config |= ARCH_PERFMON_EVENTSEL_INT;
+ if (!event->attr.exclude_kernel)
+ config |= ARCH_PERFMON_EVENTSEL_OS;
+ if (!event->attr.exclude_user)
+ config |= ARCH_PERFMON_EVENTSEL_USR;
+
+ bts_config_buffer(buf);
+
+ /*
+ * local barrier to make sure that ds configuration made it
+ * before we enable BTS
+ */
+ wmb();
+
+ intel_pmu_enable_bts(config);
+}
+
+static void bts_event_start(struct perf_event *event, int flags)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ __bts_event_start(event);
+
+ /* PMI handler: this counter is running and likely generating PMIs */
+ ACCESS_ONCE(bts->started) = 1;
+}
+
+static void __bts_event_stop(struct perf_event *event)
+{
+ /*
+ * No extra synchronization is mandated by the documentation to have
+ * BTS data stores globally visible.
+ */
+ intel_pmu_disable_bts();
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
+}
+
+static void bts_event_stop(struct perf_event *event, int flags)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ /* PMI handler: don't restart this counter */
+ ACCESS_ONCE(bts->started) = 0;
+
+ __bts_event_stop(event);
+
+ if (flags & PERF_EF_UPDATE)
+ bts_update(bts);
+}
+
+void intel_bts_enable_local(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ if (bts->handle.event && bts->started)
+ __bts_event_start(bts->handle.event);
+}
+
+void intel_bts_disable_local(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ if (bts->handle.event)
+ __bts_event_stop(bts->handle.event);
+}
+
+static int
+bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
+{
+ unsigned long head, space, next_space, pad, gap, skip, wakeup;
+ unsigned int next_buf;
+ struct bts_phys *phys, *next_phys;
+ int ret;
+
+ if (buf->snapshot)
+ return 0;
+
+ head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+ if (WARN_ON_ONCE(head != local_read(&buf->head)))
+ return -EINVAL;
+
+ phys = &buf->buf[buf->cur_buf];
+ space = phys->offset + phys->displacement + phys->size - head;
+ pad = space;
+ if (space > handle->size) {
+ space = handle->size;
+ space -= space % BTS_RECORD_SIZE;
+ }
+ if (space <= BTS_SAFETY_MARGIN) {
+ /* See if next phys buffer has more space */
+ next_buf = buf->cur_buf + 1;
+ if (next_buf >= buf->nr_bufs)
+ next_buf = 0;
+ next_phys = &buf->buf[next_buf];
+ gap = buf_size(phys->page) - phys->displacement - phys->size +
+ next_phys->displacement;
+ skip = pad + gap;
+ if (handle->size >= skip) {
+ next_space = next_phys->size;
+ if (next_space + skip > handle->size) {
+ next_space = handle->size - skip;
+ next_space -= next_space % BTS_RECORD_SIZE;
+ }
+ if (next_space > space || !space) {
+ if (pad)
+ bts_buffer_pad_out(phys, head);
+ ret = perf_aux_output_skip(handle, skip);
+ if (ret)
+ return ret;
+ /* Advance to next phys buffer */
+ phys = next_phys;
+ space = next_space;
+ head = phys->offset + phys->displacement;
+ /*
+ * After this, cur_buf and head won't match ds
+ * anymore, so we must not be racing with
+ * bts_update().
+ */
+ buf->cur_buf = next_buf;
+ local_set(&buf->head, head);
+ }
+ }
+ }
+
+ /* Don't go far beyond wakeup watermark */
+ wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
+ handle->head;
+ if (space > wakeup) {
+ space = wakeup;
+ space -= space % BTS_RECORD_SIZE;
+ }
+
+ buf->end = head + space;
+
+ /*
+ * If we have no space, the lost notification would have been sent when
+ * we hit absolute_maximum - see bts_update()
+ */
+ if (!space)
+ return -ENOSPC;
+
+ return 0;
+}
+
+int intel_bts_interrupt(void)
+{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct perf_event *event = bts->handle.event;
+ struct bts_buffer *buf;
+ s64 old_head;
+ int err;
+
+ if (!event || !bts->started)
+ return 0;
+
+ buf = perf_get_aux(&bts->handle);
+ /*
+ * Skip snapshot counters: they don't use the interrupt, but
+ * there's no other way of telling, because the pointer will
+ * keep moving
+ */
+ if (!buf || buf->snapshot)
+ return 0;
+
+ old_head = local_read(&buf->head);
+ bts_update(bts);
+
+ /* no new data */
+ if (old_head == local_read(&buf->head))
+ return 0;
+
+ perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
+ !!local_xchg(&buf->lost, 0));
+
+ buf = perf_aux_output_begin(&bts->handle, event);
+ if (!buf)
+ return 1;
+
+ err = bts_buffer_reset(buf, &bts->handle);
+ if (err)
+ perf_aux_output_end(&bts->handle, 0, false);
+
+ return 1;
+}
+
+static void bts_event_del(struct perf_event *event, int mode)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_buffer *buf = perf_get_aux(&bts->handle);
+
+ bts_event_stop(event, PERF_EF_UPDATE);
+
+ if (buf) {
+ if (buf->snapshot)
+ bts->handle.head =
+ local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+ perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
+ !!local_xchg(&buf->lost, 0));
+ }
+
+ cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
+ cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
+ cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
+ cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
+}
+
+static int bts_event_add(struct perf_event *event, int mode)
+{
+ struct bts_buffer *buf;
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret = -EBUSY;
+
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ return -EBUSY;
+
+ if (bts->handle.event)
+ return -EBUSY;
+
+ buf = perf_aux_output_begin(&bts->handle, event);
+ if (!buf)
+ return -EINVAL;
+
+ ret = bts_buffer_reset(buf, &bts->handle);
+ if (ret) {
+ perf_aux_output_end(&bts->handle, 0, false);
+ return ret;
+ }
+
+ bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
+ bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
+ bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
+
+ if (mode & PERF_EF_START) {
+ bts_event_start(event, 0);
+ if (hwc->state & PERF_HES_STOPPED) {
+ bts_event_del(event, 0);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void bts_event_destroy(struct perf_event *event)
+{
+ x86_del_exclusive(x86_lbr_exclusive_bts);
+}
+
+static int bts_event_init(struct perf_event *event)
+{
+ if (event->attr.type != bts_pmu.type)
+ return -ENOENT;
+
+ if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ return -EBUSY;
+
+ event->destroy = bts_event_destroy;
+
+ return 0;
+}
+
+static void bts_event_read(struct perf_event *event)
+{
+}
+
+static __init int bts_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
+ return -ENODEV;
+
+ bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
+ bts_pmu.task_ctx_nr = perf_sw_context;
+ bts_pmu.event_init = bts_event_init;
+ bts_pmu.add = bts_event_add;
+ bts_pmu.del = bts_event_del;
+ bts_pmu.start = bts_event_start;
+ bts_pmu.stop = bts_event_stop;
+ bts_pmu.read = bts_event_read;
+ bts_pmu.setup_aux = bts_buffer_setup_aux;
+ bts_pmu.free_aux = bts_buffer_free_aux;
+
+ return perf_pmu_register(&bts_pmu, "intel_bts", -1);
+}
+
+module_init(bts_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
new file mode 100644
index 0000000..e4d1b8b
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -0,0 +1,1379 @@
+/*
+ * Intel Cache Quality-of-Service Monitoring (CQM) support.
+ *
+ * Based very, very heavily on work by Peter Zijlstra.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include "perf_event.h"
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+#define MSR_IA32_QM_CTR 0x0c8e
+#define MSR_IA32_QM_EVTSEL 0x0c8d
+
+static unsigned int cqm_max_rmid = -1;
+static unsigned int cqm_l3_scale; /* supposedly cacheline size */
+
+struct intel_cqm_state {
+ raw_spinlock_t lock;
+ int rmid;
+ int cnt;
+};
+
+static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
+
+/*
+ * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
+ * Also protects event->hw.cqm_rmid
+ *
+ * Hold either for stability, both for modification of ->hw.cqm_rmid.
+ */
+static DEFINE_MUTEX(cache_mutex);
+static DEFINE_RAW_SPINLOCK(cache_lock);
+
+/*
+ * Groups of events that have the same target(s), one RMID per group.
+ */
+static LIST_HEAD(cache_groups);
+
+/*
+ * Mask of CPUs for reading CQM values. We only need one per-socket.
+ */
+static cpumask_t cqm_cpumask;
+
+#define RMID_VAL_ERROR (1ULL << 63)
+#define RMID_VAL_UNAVAIL (1ULL << 62)
+
+#define QOS_L3_OCCUP_EVENT_ID (1 << 0)
+
+#define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
+
+/*
+ * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
+ *
+ * This rmid is always free and is guaranteed to have an associated
+ * near-zero occupancy value, i.e. no cachelines are tagged with this
+ * RMID, once __intel_cqm_rmid_rotate() returns.
+ */
+static unsigned int intel_cqm_rotation_rmid;
+
+#define INVALID_RMID (-1)
+
+/*
+ * Is @rmid valid for programming the hardware?
+ *
+ * rmid 0 is reserved by the hardware for all non-monitored tasks, which
+ * means that we should never come across an rmid with that value.
+ * Likewise, an rmid value of -1 is used to indicate "no rmid currently
+ * assigned" and is used as part of the rotation code.
+ */
+static inline bool __rmid_valid(unsigned int rmid)
+{
+ if (!rmid || rmid == INVALID_RMID)
+ return false;
+
+ return true;
+}
+
+static u64 __rmid_read(unsigned int rmid)
+{
+ u64 val;
+
+ /*
+ * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
+ * it just says that to increase confusion.
+ */
+ wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
+ rdmsrl(MSR_IA32_QM_CTR, val);
+
+ /*
+ * Aside from the ERROR and UNAVAIL bits, assume this thing returns
+ * the number of cachelines tagged with @rmid.
+ */
+ return val;
+}
+
+enum rmid_recycle_state {
+ RMID_YOUNG = 0,
+ RMID_AVAILABLE,
+ RMID_DIRTY,
+};
+
+struct cqm_rmid_entry {
+ unsigned int rmid;
+ enum rmid_recycle_state state;
+ struct list_head list;
+ unsigned long queue_time;
+};
+
+/*
+ * cqm_rmid_free_lru - A least recently used list of RMIDs.
+ *
+ * Oldest entry at the head, newest (most recently used) entry at the
+ * tail. This list is never traversed, it's only used to keep track of
+ * the lru order. That is, we only pick entries of the head or insert
+ * them on the tail.
+ *
+ * All entries on the list are 'free', and their RMIDs are not currently
+ * in use. To mark an RMID as in use, remove its entry from the lru
+ * list.
+ *
+ *
+ * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
+ *
+ * This list is contains RMIDs that no one is currently using but that
+ * may have a non-zero occupancy value associated with them. The
+ * rotation worker moves RMIDs from the limbo list to the free list once
+ * the occupancy value drops below __intel_cqm_threshold.
+ *
+ * Both lists are protected by cache_mutex.
+ */
+static LIST_HEAD(cqm_rmid_free_lru);
+static LIST_HEAD(cqm_rmid_limbo_lru);
+
+/*
+ * We use a simple array of pointers so that we can lookup a struct
+ * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
+ * and __put_rmid() from having to worry about dealing with struct
+ * cqm_rmid_entry - they just deal with rmids, i.e. integers.
+ *
+ * Once this array is initialized it is read-only. No locks are required
+ * to access it.
+ *
+ * All entries for all RMIDs can be looked up in the this array at all
+ * times.
+ */
+static struct cqm_rmid_entry **cqm_rmid_ptrs;
+
+static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
+{
+ struct cqm_rmid_entry *entry;
+
+ entry = cqm_rmid_ptrs[rmid];
+ WARN_ON(entry->rmid != rmid);
+
+ return entry;
+}
+
+/*
+ * Returns < 0 on fail.
+ *
+ * We expect to be called with cache_mutex held.
+ */
+static int __get_rmid(void)
+{
+ struct cqm_rmid_entry *entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ if (list_empty(&cqm_rmid_free_lru))
+ return INVALID_RMID;
+
+ entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
+ list_del(&entry->list);
+
+ return entry->rmid;
+}
+
+static void __put_rmid(unsigned int rmid)
+{
+ struct cqm_rmid_entry *entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ WARN_ON(!__rmid_valid(rmid));
+ entry = __rmid_entry(rmid);
+
+ entry->queue_time = jiffies;
+ entry->state = RMID_YOUNG;
+
+ list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
+}
+
+static int intel_cqm_setup_rmid_cache(void)
+{
+ struct cqm_rmid_entry *entry;
+ unsigned int nr_rmids;
+ int r = 0;
+
+ nr_rmids = cqm_max_rmid + 1;
+ cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
+ nr_rmids, GFP_KERNEL);
+ if (!cqm_rmid_ptrs)
+ return -ENOMEM;
+
+ for (; r <= cqm_max_rmid; r++) {
+ struct cqm_rmid_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto fail;
+
+ INIT_LIST_HEAD(&entry->list);
+ entry->rmid = r;
+ cqm_rmid_ptrs[r] = entry;
+
+ list_add_tail(&entry->list, &cqm_rmid_free_lru);
+ }
+
+ /*
+ * RMID 0 is special and is always allocated. It's used for all
+ * tasks that are not monitored.
+ */
+ entry = __rmid_entry(0);
+ list_del(&entry->list);
+
+ mutex_lock(&cache_mutex);
+ intel_cqm_rotation_rmid = __get_rmid();
+ mutex_unlock(&cache_mutex);
+
+ return 0;
+fail:
+ while (r--)
+ kfree(cqm_rmid_ptrs[r]);
+
+ kfree(cqm_rmid_ptrs);
+ return -ENOMEM;
+}
+
+/*
+ * Determine if @a and @b measure the same set of tasks.
+ *
+ * If @a and @b measure the same set of tasks then we want to share a
+ * single RMID.
+ */
+static bool __match_event(struct perf_event *a, struct perf_event *b)
+{
+ /* Per-cpu and task events don't mix */
+ if ((a->attach_state & PERF_ATTACH_TASK) !=
+ (b->attach_state & PERF_ATTACH_TASK))
+ return false;
+
+#ifdef CONFIG_CGROUP_PERF
+ if (a->cgrp != b->cgrp)
+ return false;
+#endif
+
+ /* If not task event, we're machine wide */
+ if (!(b->attach_state & PERF_ATTACH_TASK))
+ return true;
+
+ /*
+ * Events that target same task are placed into the same cache group.
+ */
+ if (a->hw.target == b->hw.target)
+ return true;
+
+ /*
+ * Are we an inherited event?
+ */
+ if (b->parent == a)
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_CGROUP_PERF
+static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
+{
+ if (event->attach_state & PERF_ATTACH_TASK)
+ return perf_cgroup_from_task(event->hw.target);
+
+ return event->cgrp;
+}
+#endif
+
+/*
+ * Determine if @a's tasks intersect with @b's tasks
+ *
+ * There are combinations of events that we explicitly prohibit,
+ *
+ * PROHIBITS
+ * system-wide -> cgroup and task
+ * cgroup -> system-wide
+ * -> task in cgroup
+ * task -> system-wide
+ * -> task in cgroup
+ *
+ * Call this function before allocating an RMID.
+ */
+static bool __conflict_event(struct perf_event *a, struct perf_event *b)
+{
+#ifdef CONFIG_CGROUP_PERF
+ /*
+ * We can have any number of cgroups but only one system-wide
+ * event at a time.
+ */
+ if (a->cgrp && b->cgrp) {
+ struct perf_cgroup *ac = a->cgrp;
+ struct perf_cgroup *bc = b->cgrp;
+
+ /*
+ * This condition should have been caught in
+ * __match_event() and we should be sharing an RMID.
+ */
+ WARN_ON_ONCE(ac == bc);
+
+ if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
+ cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
+ return true;
+
+ return false;
+ }
+
+ if (a->cgrp || b->cgrp) {
+ struct perf_cgroup *ac, *bc;
+
+ /*
+ * cgroup and system-wide events are mutually exclusive
+ */
+ if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
+ (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
+ return true;
+
+ /*
+ * Ensure neither event is part of the other's cgroup
+ */
+ ac = event_to_cgroup(a);
+ bc = event_to_cgroup(b);
+ if (ac == bc)
+ return true;
+
+ /*
+ * Must have cgroup and non-intersecting task events.
+ */
+ if (!ac || !bc)
+ return false;
+
+ /*
+ * We have cgroup and task events, and the task belongs
+ * to a cgroup. Check for for overlap.
+ */
+ if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
+ cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
+ return true;
+
+ return false;
+ }
+#endif
+ /*
+ * If one of them is not a task, same story as above with cgroups.
+ */
+ if (!(a->attach_state & PERF_ATTACH_TASK) ||
+ !(b->attach_state & PERF_ATTACH_TASK))
+ return true;
+
+ /*
+ * Must be non-overlapping.
+ */
+ return false;
+}
+
+struct rmid_read {
+ unsigned int rmid;
+ atomic64_t value;
+};
+
+static void __intel_cqm_event_count(void *info);
+
+/*
+ * Exchange the RMID of a group of events.
+ */
+static unsigned int
+intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid)
+{
+ struct perf_event *event;
+ unsigned int old_rmid = group->hw.cqm_rmid;
+ struct list_head *head = &group->hw.cqm_group_entry;
+
+ lockdep_assert_held(&cache_mutex);
+
+ /*
+ * If our RMID is being deallocated, perform a read now.
+ */
+ if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
+ struct rmid_read rr = {
+ .value = ATOMIC64_INIT(0),
+ .rmid = old_rmid,
+ };
+
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
+ &rr, 1);
+ local64_set(&group->count, atomic64_read(&rr.value));
+ }
+
+ raw_spin_lock_irq(&cache_lock);
+
+ group->hw.cqm_rmid = rmid;
+ list_for_each_entry(event, head, hw.cqm_group_entry)
+ event->hw.cqm_rmid = rmid;
+
+ raw_spin_unlock_irq(&cache_lock);
+
+ return old_rmid;
+}
+
+/*
+ * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
+ * cachelines are still tagged with RMIDs in limbo, we progressively
+ * increment the threshold until we find an RMID in limbo with <=
+ * __intel_cqm_threshold lines tagged. This is designed to mitigate the
+ * problem where cachelines tagged with an RMID are not steadily being
+ * evicted.
+ *
+ * On successful rotations we decrease the threshold back towards zero.
+ *
+ * __intel_cqm_max_threshold provides an upper bound on the threshold,
+ * and is measured in bytes because it's exposed to userland.
+ */
+static unsigned int __intel_cqm_threshold;
+static unsigned int __intel_cqm_max_threshold;
+
+/*
+ * Test whether an RMID has a zero occupancy value on this cpu.
+ */
+static void intel_cqm_stable(void *arg)
+{
+ struct cqm_rmid_entry *entry;
+
+ list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
+ if (entry->state != RMID_AVAILABLE)
+ break;
+
+ if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
+ entry->state = RMID_DIRTY;
+ }
+}
+
+/*
+ * If we have group events waiting for an RMID that don't conflict with
+ * events already running, assign @rmid.
+ */
+static bool intel_cqm_sched_in_event(unsigned int rmid)
+{
+ struct perf_event *leader, *event;
+
+ lockdep_assert_held(&cache_mutex);
+
+ leader = list_first_entry(&cache_groups, struct perf_event,
+ hw.cqm_groups_entry);
+ event = leader;
+
+ list_for_each_entry_continue(event, &cache_groups,
+ hw.cqm_groups_entry) {
+ if (__rmid_valid(event->hw.cqm_rmid))
+ continue;
+
+ if (__conflict_event(event, leader))
+ continue;
+
+ intel_cqm_xchg_rmid(event, rmid);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Initially use this constant for both the limbo queue time and the
+ * rotation timer interval, pmu::hrtimer_interval_ms.
+ *
+ * They don't need to be the same, but the two are related since if you
+ * rotate faster than you recycle RMIDs, you may run out of available
+ * RMIDs.
+ */
+#define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
+
+static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
+
+/*
+ * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
+ * @nr_available: number of freeable RMIDs on the limbo list
+ *
+ * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
+ * cachelines are tagged with those RMIDs. After this we can reuse them
+ * and know that the current set of active RMIDs is stable.
+ *
+ * Return %true or %false depending on whether stabilization needs to be
+ * reattempted.
+ *
+ * If we return %true then @nr_available is updated to indicate the
+ * number of RMIDs on the limbo list that have been queued for the
+ * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
+ * are above __intel_cqm_threshold.
+ */
+static bool intel_cqm_rmid_stabilize(unsigned int *available)
+{
+ struct cqm_rmid_entry *entry, *tmp;
+
+ lockdep_assert_held(&cache_mutex);
+
+ *available = 0;
+ list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
+ unsigned long min_queue_time;
+ unsigned long now = jiffies;
+
+ /*
+ * We hold RMIDs placed into limbo for a minimum queue
+ * time. Before the minimum queue time has elapsed we do
+ * not recycle RMIDs.
+ *
+ * The reasoning is that until a sufficient time has
+ * passed since we stopped using an RMID, any RMID
+ * placed onto the limbo list will likely still have
+ * data tagged in the cache, which means we'll probably
+ * fail to recycle it anyway.
+ *
+ * We can save ourselves an expensive IPI by skipping
+ * any RMIDs that have not been queued for the minimum
+ * time.
+ */
+ min_queue_time = entry->queue_time +
+ msecs_to_jiffies(__rmid_queue_time_ms);
+
+ if (time_after(min_queue_time, now))
+ break;
+
+ entry->state = RMID_AVAILABLE;
+ (*available)++;
+ }
+
+ /*
+ * Fast return if none of the RMIDs on the limbo list have been
+ * sitting on the queue for the minimum queue time.
+ */
+ if (!*available)
+ return false;
+
+ /*
+ * Test whether an RMID is free for each package.
+ */
+ on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
+
+ list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
+ /*
+ * Exhausted all RMIDs that have waited min queue time.
+ */
+ if (entry->state == RMID_YOUNG)
+ break;
+
+ if (entry->state == RMID_DIRTY)
+ continue;
+
+ list_del(&entry->list); /* remove from limbo */
+
+ /*
+ * The rotation RMID gets priority if it's
+ * currently invalid. In which case, skip adding
+ * the RMID to the the free lru.
+ */
+ if (!__rmid_valid(intel_cqm_rotation_rmid)) {
+ intel_cqm_rotation_rmid = entry->rmid;
+ continue;
+ }
+
+ /*
+ * If we have groups waiting for RMIDs, hand
+ * them one now provided they don't conflict.
+ */
+ if (intel_cqm_sched_in_event(entry->rmid))
+ continue;
+
+ /*
+ * Otherwise place it onto the free list.
+ */
+ list_add_tail(&entry->list, &cqm_rmid_free_lru);
+ }
+
+
+ return __rmid_valid(intel_cqm_rotation_rmid);
+}
+
+/*
+ * Pick a victim group and move it to the tail of the group list.
+ * @next: The first group without an RMID
+ */
+static void __intel_cqm_pick_and_rotate(struct perf_event *next)
+{
+ struct perf_event *rotor;
+ unsigned int rmid;
+
+ lockdep_assert_held(&cache_mutex);
+
+ rotor = list_first_entry(&cache_groups, struct perf_event,
+ hw.cqm_groups_entry);
+
+ /*
+ * The group at the front of the list should always have a valid
+ * RMID. If it doesn't then no groups have RMIDs assigned and we
+ * don't need to rotate the list.
+ */
+ if (next == rotor)
+ return;
+
+ rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
+ __put_rmid(rmid);
+
+ list_rotate_left(&cache_groups);
+}
+
+/*
+ * Deallocate the RMIDs from any events that conflict with @event, and
+ * place them on the back of the group list.
+ */
+static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
+{
+ struct perf_event *group, *g;
+ unsigned int rmid;
+
+ lockdep_assert_held(&cache_mutex);
+
+ list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
+ if (group == event)
+ continue;
+
+ rmid = group->hw.cqm_rmid;
+
+ /*
+ * Skip events that don't have a valid RMID.
+ */
+ if (!__rmid_valid(rmid))
+ continue;
+
+ /*
+ * No conflict? No problem! Leave the event alone.
+ */
+ if (!__conflict_event(group, event))
+ continue;
+
+ intel_cqm_xchg_rmid(group, INVALID_RMID);
+ __put_rmid(rmid);
+ }
+}
+
+/*
+ * Attempt to rotate the groups and assign new RMIDs.
+ *
+ * We rotate for two reasons,
+ * 1. To handle the scheduling of conflicting events
+ * 2. To recycle RMIDs
+ *
+ * Rotating RMIDs is complicated because the hardware doesn't give us
+ * any clues.
+ *
+ * There's problems with the hardware interface; when you change the
+ * task:RMID map cachelines retain their 'old' tags, giving a skewed
+ * picture. In order to work around this, we must always keep one free
+ * RMID - intel_cqm_rotation_rmid.
+ *
+ * Rotation works by taking away an RMID from a group (the old RMID),
+ * and assigning the free RMID to another group (the new RMID). We must
+ * then wait for the old RMID to not be used (no cachelines tagged).
+ * This ensure that all cachelines are tagged with 'active' RMIDs. At
+ * this point we can start reading values for the new RMID and treat the
+ * old RMID as the free RMID for the next rotation.
+ *
+ * Return %true or %false depending on whether we did any rotating.
+ */
+static bool __intel_cqm_rmid_rotate(void)
+{
+ struct perf_event *group, *start = NULL;
+ unsigned int threshold_limit;
+ unsigned int nr_needed = 0;
+ unsigned int nr_available;
+ bool rotated = false;
+
+ mutex_lock(&cache_mutex);
+
+again:
+ /*
+ * Fast path through this function if there are no groups and no
+ * RMIDs that need cleaning.
+ */
+ if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
+ goto out;
+
+ list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
+ if (!__rmid_valid(group->hw.cqm_rmid)) {
+ if (!start)
+ start = group;
+ nr_needed++;
+ }
+ }
+
+ /*
+ * We have some event groups, but they all have RMIDs assigned
+ * and no RMIDs need cleaning.
+ */
+ if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
+ goto out;
+
+ if (!nr_needed)
+ goto stabilize;
+
+ /*
+ * We have more event groups without RMIDs than available RMIDs,
+ * or we have event groups that conflict with the ones currently
+ * scheduled.
+ *
+ * We force deallocate the rmid of the group at the head of
+ * cache_groups. The first event group without an RMID then gets
+ * assigned intel_cqm_rotation_rmid. This ensures we always make
+ * forward progress.
+ *
+ * Rotate the cache_groups list so the previous head is now the
+ * tail.
+ */
+ __intel_cqm_pick_and_rotate(start);
+
+ /*
+ * If the rotation is going to succeed, reduce the threshold so
+ * that we don't needlessly reuse dirty RMIDs.
+ */
+ if (__rmid_valid(intel_cqm_rotation_rmid)) {
+ intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
+ intel_cqm_rotation_rmid = __get_rmid();
+
+ intel_cqm_sched_out_conflicting_events(start);
+
+ if (__intel_cqm_threshold)
+ __intel_cqm_threshold--;
+ }
+
+ rotated = true;
+
+stabilize:
+ /*
+ * We now need to stablize the RMID we freed above (if any) to
+ * ensure that the next time we rotate we have an RMID with zero
+ * occupancy value.
+ *
+ * Alternatively, if we didn't need to perform any rotation,
+ * we'll have a bunch of RMIDs in limbo that need stabilizing.
+ */
+ threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
+
+ while (intel_cqm_rmid_stabilize(&nr_available) &&
+ __intel_cqm_threshold < threshold_limit) {
+ unsigned int steal_limit;
+
+ /*
+ * Don't spin if nobody is actively waiting for an RMID,
+ * the rotation worker will be kicked as soon as an
+ * event needs an RMID anyway.
+ */
+ if (!nr_needed)
+ break;
+
+ /* Allow max 25% of RMIDs to be in limbo. */
+ steal_limit = (cqm_max_rmid + 1) / 4;
+
+ /*
+ * We failed to stabilize any RMIDs so our rotation
+ * logic is now stuck. In order to make forward progress
+ * we have a few options:
+ *
+ * 1. rotate ("steal") another RMID
+ * 2. increase the threshold
+ * 3. do nothing
+ *
+ * We do both of 1. and 2. until we hit the steal limit.
+ *
+ * The steal limit prevents all RMIDs ending up on the
+ * limbo list. This can happen if every RMID has a
+ * non-zero occupancy above threshold_limit, and the
+ * occupancy values aren't dropping fast enough.
+ *
+ * Note that there is prioritisation at work here - we'd
+ * rather increase the number of RMIDs on the limbo list
+ * than increase the threshold, because increasing the
+ * threshold skews the event data (because we reuse
+ * dirty RMIDs) - threshold bumps are a last resort.
+ */
+ if (nr_available < steal_limit)
+ goto again;
+
+ __intel_cqm_threshold++;
+ }
+
+out:
+ mutex_unlock(&cache_mutex);
+ return rotated;
+}
+
+static void intel_cqm_rmid_rotate(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
+
+static struct pmu intel_cqm_pmu;
+
+static void intel_cqm_rmid_rotate(struct work_struct *work)
+{
+ unsigned long delay;
+
+ __intel_cqm_rmid_rotate();
+
+ delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
+ schedule_delayed_work(&intel_cqm_rmid_work, delay);
+}
+
+/*
+ * Find a group and setup RMID.
+ *
+ * If we're part of a group, we use the group's RMID.
+ */
+static void intel_cqm_setup_event(struct perf_event *event,
+ struct perf_event **group)
+{
+ struct perf_event *iter;
+ unsigned int rmid;
+ bool conflict = false;
+
+ list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
+ rmid = iter->hw.cqm_rmid;
+
+ if (__match_event(iter, event)) {
+ /* All tasks in a group share an RMID */
+ event->hw.cqm_rmid = rmid;
+ *group = iter;
+ return;
+ }
+
+ /*
+ * We only care about conflicts for events that are
+ * actually scheduled in (and hence have a valid RMID).
+ */
+ if (__conflict_event(iter, event) && __rmid_valid(rmid))
+ conflict = true;
+ }
+
+ if (conflict)
+ rmid = INVALID_RMID;
+ else
+ rmid = __get_rmid();
+
+ event->hw.cqm_rmid = rmid;
+}
+
+static void intel_cqm_event_read(struct perf_event *event)
+{
+ unsigned long flags;
+ unsigned int rmid;
+ u64 val;
+
+ /*
+ * Task events are handled by intel_cqm_event_count().
+ */
+ if (event->cpu == -1)
+ return;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+ rmid = event->hw.cqm_rmid;
+
+ if (!__rmid_valid(rmid))
+ goto out;
+
+ val = __rmid_read(rmid);
+
+ /*
+ * Ignore this reading on error states and do not update the value.
+ */
+ if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
+ goto out;
+
+ local64_set(&event->count, val);
+out:
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+}
+
+static void __intel_cqm_event_count(void *info)
+{
+ struct rmid_read *rr = info;
+ u64 val;
+
+ val = __rmid_read(rr->rmid);
+
+ if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
+ return;
+
+ atomic64_add(val, &rr->value);
+}
+
+static inline bool cqm_group_leader(struct perf_event *event)
+{
+ return !list_empty(&event->hw.cqm_groups_entry);
+}
+
+static u64 intel_cqm_event_count(struct perf_event *event)
+{
+ unsigned long flags;
+ struct rmid_read rr = {
+ .value = ATOMIC64_INIT(0),
+ };
+
+ /*
+ * We only need to worry about task events. System-wide events
+ * are handled like usual, i.e. entirely with
+ * intel_cqm_event_read().
+ */
+ if (event->cpu != -1)
+ return __perf_event_count(event);
+
+ /*
+ * Only the group leader gets to report values. This stops us
+ * reporting duplicate values to userspace, and gives us a clear
+ * rule for which task gets to report the values.
+ *
+ * Note that it is impossible to attribute these values to
+ * specific packages - we forfeit that ability when we create
+ * task events.
+ */
+ if (!cqm_group_leader(event))
+ return 0;
+
+ /*
+ * Notice that we don't perform the reading of an RMID
+ * atomically, because we can't hold a spin lock across the
+ * IPIs.
+ *
+ * Speculatively perform the read, since @event might be
+ * assigned a different (possibly invalid) RMID while we're
+ * busying performing the IPI calls. It's therefore necessary to
+ * check @event's RMID afterwards, and if it has changed,
+ * discard the result of the read.
+ */
+ rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
+
+ if (!__rmid_valid(rr.rmid))
+ goto out;
+
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+ if (event->hw.cqm_rmid == rr.rmid)
+ local64_set(&event->count, atomic64_read(&rr.value));
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+out:
+ return __perf_event_count(event);
+}
+
+static void intel_cqm_event_start(struct perf_event *event, int mode)
+{
+ struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+ unsigned int rmid = event->hw.cqm_rmid;
+ unsigned long flags;
+
+ if (!(event->hw.cqm_state & PERF_HES_STOPPED))
+ return;
+
+ event->hw.cqm_state &= ~PERF_HES_STOPPED;
+
+ raw_spin_lock_irqsave(&state->lock, flags);
+
+ if (state->cnt++)
+ WARN_ON_ONCE(state->rmid != rmid);
+ else
+ WARN_ON_ONCE(state->rmid);
+
+ state->rmid = rmid;
+ wrmsrl(MSR_IA32_PQR_ASSOC, state->rmid);
+
+ raw_spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static void intel_cqm_event_stop(struct perf_event *event, int mode)
+{
+ struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
+ unsigned long flags;
+
+ if (event->hw.cqm_state & PERF_HES_STOPPED)
+ return;
+
+ event->hw.cqm_state |= PERF_HES_STOPPED;
+
+ raw_spin_lock_irqsave(&state->lock, flags);
+ intel_cqm_event_read(event);
+
+ if (!--state->cnt) {
+ state->rmid = 0;
+ wrmsrl(MSR_IA32_PQR_ASSOC, 0);
+ } else {
+ WARN_ON_ONCE(!state->rmid);
+ }
+
+ raw_spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int intel_cqm_event_add(struct perf_event *event, int mode)
+{
+ unsigned long flags;
+ unsigned int rmid;
+
+ raw_spin_lock_irqsave(&cache_lock, flags);
+
+ event->hw.cqm_state = PERF_HES_STOPPED;
+ rmid = event->hw.cqm_rmid;
+
+ if (__rmid_valid(rmid) && (mode & PERF_EF_START))
+ intel_cqm_event_start(event, mode);
+
+ raw_spin_unlock_irqrestore(&cache_lock, flags);
+
+ return 0;
+}
+
+static void intel_cqm_event_del(struct perf_event *event, int mode)
+{
+ intel_cqm_event_stop(event, mode);
+}
+
+static void intel_cqm_event_destroy(struct perf_event *event)
+{
+ struct perf_event *group_other = NULL;
+
+ mutex_lock(&cache_mutex);
+
+ /*
+ * If there's another event in this group...
+ */
+ if (!list_empty(&event->hw.cqm_group_entry)) {
+ group_other = list_first_entry(&event->hw.cqm_group_entry,
+ struct perf_event,
+ hw.cqm_group_entry);
+ list_del(&event->hw.cqm_group_entry);
+ }
+
+ /*
+ * And we're the group leader..
+ */
+ if (cqm_group_leader(event)) {
+ /*
+ * If there was a group_other, make that leader, otherwise
+ * destroy the group and return the RMID.
+ */
+ if (group_other) {
+ list_replace(&event->hw.cqm_groups_entry,
+ &group_other->hw.cqm_groups_entry);
+ } else {
+ unsigned int rmid = event->hw.cqm_rmid;
+
+ if (__rmid_valid(rmid))
+ __put_rmid(rmid);
+ list_del(&event->hw.cqm_groups_entry);
+ }
+ }
+
+ mutex_unlock(&cache_mutex);
+}
+
+static int intel_cqm_event_init(struct perf_event *event)
+{
+ struct perf_event *group = NULL;
+ bool rotate = false;
+
+ if (event->attr.type != intel_cqm_pmu.type)
+ return -ENOENT;
+
+ if (event->attr.config & ~QOS_EVENT_MASK)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&event->hw.cqm_group_entry);
+ INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
+
+ event->destroy = intel_cqm_event_destroy;
+
+ mutex_lock(&cache_mutex);
+
+ /* Will also set rmid */
+ intel_cqm_setup_event(event, &group);
+
+ if (group) {
+ list_add_tail(&event->hw.cqm_group_entry,
+ &group->hw.cqm_group_entry);
+ } else {
+ list_add_tail(&event->hw.cqm_groups_entry,
+ &cache_groups);
+
+ /*
+ * All RMIDs are either in use or have recently been
+ * used. Kick the rotation worker to clean/free some.
+ *
+ * We only do this for the group leader, rather than for
+ * every event in a group to save on needless work.
+ */
+ if (!__rmid_valid(event->hw.cqm_rmid))
+ rotate = true;
+ }
+
+ mutex_unlock(&cache_mutex);
+
+ if (rotate)
+ schedule_delayed_work(&intel_cqm_rmid_work, 0);
+
+ return 0;
+}
+
+EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
+EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
+EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
+EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
+EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
+
+static struct attribute *intel_cqm_events_attr[] = {
+ EVENT_PTR(intel_cqm_llc),
+ EVENT_PTR(intel_cqm_llc_pkg),
+ EVENT_PTR(intel_cqm_llc_unit),
+ EVENT_PTR(intel_cqm_llc_scale),
+ EVENT_PTR(intel_cqm_llc_snapshot),
+ NULL,
+};
+
+static struct attribute_group intel_cqm_events_group = {
+ .name = "events",
+ .attrs = intel_cqm_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+static struct attribute *intel_cqm_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group intel_cqm_format_group = {
+ .name = "format",
+ .attrs = intel_cqm_formats_attr,
+};
+
+static ssize_t
+max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ ssize_t rv;
+
+ mutex_lock(&cache_mutex);
+ rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
+ mutex_unlock(&cache_mutex);
+
+ return rv;
+}
+
+static ssize_t
+max_recycle_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int bytes, cachelines;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &bytes);
+ if (ret)
+ return ret;
+
+ mutex_lock(&cache_mutex);
+
+ __intel_cqm_max_threshold = bytes;
+ cachelines = bytes / cqm_l3_scale;
+
+ /*
+ * The new maximum takes effect immediately.
+ */
+ if (__intel_cqm_threshold > cachelines)
+ __intel_cqm_threshold = cachelines;
+
+ mutex_unlock(&cache_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_recycle_threshold);
+
+static struct attribute *intel_cqm_attrs[] = {
+ &dev_attr_max_recycle_threshold.attr,
+ NULL,
+};
+
+static const struct attribute_group intel_cqm_group = {
+ .attrs = intel_cqm_attrs,
+};
+
+static const struct attribute_group *intel_cqm_attr_groups[] = {
+ &intel_cqm_events_group,
+ &intel_cqm_format_group,
+ &intel_cqm_group,
+ NULL,
+};
+
+static struct pmu intel_cqm_pmu = {
+ .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
+ .attr_groups = intel_cqm_attr_groups,
+ .task_ctx_nr = perf_sw_context,
+ .event_init = intel_cqm_event_init,
+ .add = intel_cqm_event_add,
+ .del = intel_cqm_event_del,
+ .start = intel_cqm_event_start,
+ .stop = intel_cqm_event_stop,
+ .read = intel_cqm_event_read,
+ .count = intel_cqm_event_count,
+};
+
+static inline void cqm_pick_event_reader(int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ int i;
+
+ for_each_cpu(i, &cqm_cpumask) {
+ if (phys_id == topology_physical_package_id(i))
+ return; /* already got reader for this socket */
+ }
+
+ cpumask_set_cpu(cpu, &cqm_cpumask);
+}
+
+static void intel_cqm_cpu_prepare(unsigned int cpu)
+{
+ struct intel_cqm_state *state = &per_cpu(cqm_state, cpu);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ raw_spin_lock_init(&state->lock);
+ state->rmid = 0;
+ state->cnt = 0;
+
+ WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
+ WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
+}
+
+static void intel_cqm_cpu_exit(unsigned int cpu)
+{
+ int phys_id = topology_physical_package_id(cpu);
+ int i;
+
+ /*
+ * Is @cpu a designated cqm reader?
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
+ return;
+
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+
+ if (phys_id == topology_physical_package_id(i)) {
+ cpumask_set_cpu(i, &cqm_cpumask);
+ break;
+ }
+ }
+}
+
+static int intel_cqm_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ intel_cqm_cpu_prepare(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ intel_cqm_cpu_exit(cpu);
+ break;
+ case CPU_STARTING:
+ cqm_pick_event_reader(cpu);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct x86_cpu_id intel_cqm_match[] = {
+ { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
+ {}
+};
+
+static int __init intel_cqm_init(void)
+{
+ char *str, scale[20];
+ int i, cpu, ret;
+
+ if (!x86_match_cpu(intel_cqm_match))
+ return -ENODEV;
+
+ cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
+
+ /*
+ * It's possible that not all resources support the same number
+ * of RMIDs. Instead of making scheduling much more complicated
+ * (where we have to match a task's RMID to a cpu that supports
+ * that many RMIDs) just find the minimum RMIDs supported across
+ * all cpus.
+ *
+ * Also, check that the scales match on all cpus.
+ */
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86_cache_max_rmid < cqm_max_rmid)
+ cqm_max_rmid = c->x86_cache_max_rmid;
+
+ if (c->x86_cache_occ_scale != cqm_l3_scale) {
+ pr_err("Multiple LLC scale values, disabling\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * A reasonable upper limit on the max threshold is the number
+ * of lines tagged per RMID if all RMIDs have the same number of
+ * lines tagged in the LLC.
+ *
+ * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
+ */
+ __intel_cqm_max_threshold =
+ boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
+
+ snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
+ str = kstrdup(scale, GFP_KERNEL);
+ if (!str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ event_attr_intel_cqm_llc_scale.event_str = str;
+
+ ret = intel_cqm_setup_rmid_cache();
+ if (ret)
+ goto out;
+
+ for_each_online_cpu(i) {
+ intel_cqm_cpu_prepare(i);
+ cqm_pick_event_reader(i);
+ }
+
+ __perf_cpu_notifier(intel_cqm_cpu_notifier);
+
+ ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
+ if (ret)
+ pr_err("Intel CQM perf registration failed: %d\n", ret);
+ else
+ pr_info("Intel CQM monitoring enabled\n");
+
+out:
+ cpu_notifier_register_done();
+
+ return ret;
+}
+device_initcall(intel_cqm_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 0739833..ca69ea5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -461,7 +461,8 @@ void intel_pmu_enable_bts(u64 config)
debugctlmsr |= DEBUGCTLMSR_TR;
debugctlmsr |= DEBUGCTLMSR_BTS;
- debugctlmsr |= DEBUGCTLMSR_BTINT;
+ if (config & ARCH_PERFMON_EVENTSEL_INT)
+ debugctlmsr |= DEBUGCTLMSR_BTINT;
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
@@ -611,6 +612,10 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
@@ -622,6 +627,10 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
@@ -633,16 +642,16 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
- INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 58f1a94..94e5b50 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -39,6 +39,7 @@ static enum {
#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
#define LBR_FAR_BIT 8 /* do not capture far branches */
+#define LBR_CALL_STACK_BIT 9 /* enable call stack */
#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
#define LBR_USER (1 << LBR_USER_BIT)
@@ -49,6 +50,7 @@ static enum {
#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
#define LBR_FAR (1 << LBR_FAR_BIT)
+#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
#define LBR_PLM (LBR_KERNEL | LBR_USER)
@@ -69,33 +71,31 @@ static enum {
#define LBR_FROM_FLAG_IN_TX (1ULL << 62)
#define LBR_FROM_FLAG_ABORT (1ULL << 61)
-#define for_each_branch_sample_type(x) \
- for ((x) = PERF_SAMPLE_BRANCH_USER; \
- (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
-
/*
* x86control flow change classification
* x86control flow changes include branches, interrupts, traps, faults
*/
enum {
- X86_BR_NONE = 0, /* unknown */
-
- X86_BR_USER = 1 << 0, /* branch target is user */
- X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
-
- X86_BR_CALL = 1 << 2, /* call */
- X86_BR_RET = 1 << 3, /* return */
- X86_BR_SYSCALL = 1 << 4, /* syscall */
- X86_BR_SYSRET = 1 << 5, /* syscall return */
- X86_BR_INT = 1 << 6, /* sw interrupt */
- X86_BR_IRET = 1 << 7, /* return from interrupt */
- X86_BR_JCC = 1 << 8, /* conditional */
- X86_BR_JMP = 1 << 9, /* jump */
- X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
- X86_BR_IND_CALL = 1 << 11,/* indirect calls */
- X86_BR_ABORT = 1 << 12,/* transaction abort */
- X86_BR_IN_TX = 1 << 13,/* in transaction */
- X86_BR_NO_TX = 1 << 14,/* not in transaction */
+ X86_BR_NONE = 0, /* unknown */
+
+ X86_BR_USER = 1 << 0, /* branch target is user */
+ X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
+
+ X86_BR_CALL = 1 << 2, /* call */
+ X86_BR_RET = 1 << 3, /* return */
+ X86_BR_SYSCALL = 1 << 4, /* syscall */
+ X86_BR_SYSRET = 1 << 5, /* syscall return */
+ X86_BR_INT = 1 << 6, /* sw interrupt */
+ X86_BR_IRET = 1 << 7, /* return from interrupt */
+ X86_BR_JCC = 1 << 8, /* conditional */
+ X86_BR_JMP = 1 << 9, /* jump */
+ X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
+ X86_BR_IND_CALL = 1 << 11,/* indirect calls */
+ X86_BR_ABORT = 1 << 12,/* transaction abort */
+ X86_BR_IN_TX = 1 << 13,/* in transaction */
+ X86_BR_NO_TX = 1 << 14,/* not in transaction */
+ X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
+ X86_BR_CALL_STACK = 1 << 16,/* call stack */
};
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@@ -112,13 +112,15 @@ enum {
X86_BR_JMP |\
X86_BR_IRQ |\
X86_BR_ABORT |\
- X86_BR_IND_CALL)
+ X86_BR_IND_CALL |\
+ X86_BR_ZERO_CALL)
#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
#define X86_BR_ANY_CALL \
(X86_BR_CALL |\
X86_BR_IND_CALL |\
+ X86_BR_ZERO_CALL |\
X86_BR_SYSCALL |\
X86_BR_IRQ |\
X86_BR_INT)
@@ -130,17 +132,32 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
* otherwise it becomes near impossible to get a reliable stack.
*/
-static void __intel_pmu_lbr_enable(void)
+static void __intel_pmu_lbr_enable(bool pmi)
{
- u64 debugctl;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 debugctl, lbr_select = 0, orig_debugctl;
- if (cpuc->lbr_sel)
- wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
+ /*
+ * No need to reprogram LBR_SELECT in a PMI, as it
+ * did not change.
+ */
+ if (cpuc->lbr_sel && !pmi) {
+ lbr_select = cpuc->lbr_sel->config;
+ wrmsrl(MSR_LBR_SELECT, lbr_select);
+ }
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
- debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ orig_debugctl = debugctl;
+ debugctl |= DEBUGCTLMSR_LBR;
+ /*
+ * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
+ * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
+ * may cause superfluous increase/decrease of LBR_TOS.
+ */
+ if (!(lbr_select & LBR_CALL_STACK))
+ debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
+ if (orig_debugctl != debugctl)
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
static void __intel_pmu_lbr_disable(void)
@@ -181,9 +198,116 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_64();
}
+/*
+ * TOS = most recently recorded branch
+ */
+static inline u64 intel_pmu_lbr_tos(void)
+{
+ u64 tos;
+
+ rdmsrl(x86_pmu.lbr_tos, tos);
+ return tos;
+}
+
+enum {
+ LBR_NONE,
+ LBR_VALID,
+};
+
+static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
+{
+ int i;
+ unsigned lbr_idx, mask;
+ u64 tos;
+
+ if (task_ctx->lbr_callstack_users == 0 ||
+ task_ctx->lbr_stack_state == LBR_NONE) {
+ intel_pmu_lbr_reset();
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ tos = intel_pmu_lbr_tos();
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ }
+ task_ctx->lbr_stack_state = LBR_NONE;
+}
+
+static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
+{
+ int i;
+ unsigned lbr_idx, mask;
+ u64 tos;
+
+ if (task_ctx->lbr_callstack_users == 0) {
+ task_ctx->lbr_stack_state = LBR_NONE;
+ return;
+ }
+
+ mask = x86_pmu.lbr_nr - 1;
+ tos = intel_pmu_lbr_tos();
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+ }
+ task_ctx->lbr_stack_state = LBR_VALID;
+}
+
+void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ /*
+ * If LBR callstack feature is enabled and the stack was saved when
+ * the task was scheduled out, restore the stack. Otherwise flush
+ * the LBR stack.
+ */
+ task_ctx = ctx ? ctx->task_ctx_data : NULL;
+ if (task_ctx) {
+ if (sched_in) {
+ __intel_pmu_lbr_restore(task_ctx);
+ cpuc->lbr_context = ctx;
+ } else {
+ __intel_pmu_lbr_save(task_ctx);
+ }
+ return;
+ }
+
+ /*
+ * When sampling the branck stack in system-wide, it may be
+ * necessary to flush the stack on context switch. This happens
+ * when the branch stack does not tag its entries with the pid
+ * of the current task. Otherwise it becomes impossible to
+ * associate a branch entry with a task. This ambiguity is more
+ * likely to appear when the branch stack supports priv level
+ * filtering and the user sets it to monitor only at the user
+ * level (which could be a useful measurement in system-wide
+ * mode). In that case, the risk is high of having a branch
+ * stack with branch from multiple tasks.
+ */
+ if (sched_in) {
+ intel_pmu_lbr_reset();
+ cpuc->lbr_context = ctx;
+ }
+}
+
+static inline bool branch_user_callstack(unsigned br_sel)
+{
+ return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
+}
+
void intel_pmu_lbr_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
@@ -198,18 +322,33 @@ void intel_pmu_lbr_enable(struct perf_event *event)
}
cpuc->br_sel = event->hw.branch_reg.reg;
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
+ event->ctx->task_ctx_data) {
+ task_ctx = event->ctx->task_ctx_data;
+ task_ctx->lbr_callstack_users++;
+ }
+
cpuc->lbr_users++;
+ perf_sched_cb_inc(event->ctx->pmu);
}
void intel_pmu_lbr_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
+ if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
+ event->ctx->task_ctx_data) {
+ task_ctx = event->ctx->task_ctx_data;
+ task_ctx->lbr_callstack_users--;
+ }
+
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
+ perf_sched_cb_dec(event->ctx->pmu);
if (cpuc->enabled && !cpuc->lbr_users) {
__intel_pmu_lbr_disable();
@@ -218,12 +357,12 @@ void intel_pmu_lbr_disable(struct perf_event *event)
}
}
-void intel_pmu_lbr_enable_all(void)
+void intel_pmu_lbr_enable_all(bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users)
- __intel_pmu_lbr_enable();
+ __intel_pmu_lbr_enable(pmi);
}
void intel_pmu_lbr_disable_all(void)
@@ -234,18 +373,6 @@ void intel_pmu_lbr_disable_all(void)
__intel_pmu_lbr_disable();
}
-/*
- * TOS = most recently recorded branch
- */
-static inline u64 intel_pmu_lbr_tos(void)
-{
- u64 tos;
-
- rdmsrl(x86_pmu.lbr_tos, tos);
-
- return tos;
-}
-
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
@@ -350,7 +477,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@@ -387,11 +514,21 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_COND)
mask |= X86_BR_JCC;
+ if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
+ if (!x86_pmu_has_lbr_callstack())
+ return -EOPNOTSUPP;
+ if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
+ return -EINVAL;
+ mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
+ X86_BR_CALL_STACK;
+ }
+
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
+ return 0;
}
/*
@@ -403,14 +540,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{
struct hw_perf_event_extra *reg;
u64 br_type = event->attr.branch_sample_type;
- u64 mask = 0, m;
- u64 v;
+ u64 mask = 0, v;
+ int i;
- for_each_branch_sample_type(m) {
- if (!(br_type & m))
+ for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
+ if (!(br_type & (1ULL << i)))
continue;
- v = x86_pmu.lbr_sel_map[m];
+ v = x86_pmu.lbr_sel_map[i];
if (v == LBR_NOT_SUPP)
return -EOPNOTSUPP;
@@ -420,8 +557,12 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;
- /* LBR_SELECT operates in suppress mode so invert mask */
- reg->config = ~mask & x86_pmu.lbr_sel_mask;
+ /*
+ * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
+ * in suppress mode. So LBR_SELECT should be set to
+ * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+ */
+ reg->config = mask ^ x86_pmu.lbr_sel_mask;
return 0;
}
@@ -439,7 +580,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
- intel_pmu_setup_sw_lbr_filter(event);
+ ret = intel_pmu_setup_sw_lbr_filter(event);
+ if (ret)
+ return ret;
/*
* setup HW LBR filter, if any
@@ -568,6 +711,12 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_INT;
break;
case 0xe8: /* call near rel */
+ insn_get_immediate(&insn);
+ if (insn.immediate1.value == 0) {
+ /* zero length call */
+ ret = X86_BR_ZERO_CALL;
+ break;
+ }
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
@@ -678,35 +827,49 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
/*
* Map interface branch filters onto LBR filters
*/
-static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
- | LBR_IND_JMP | LBR_FAR,
+static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
+ | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
*/
- [PERF_SAMPLE_BRANCH_ANY_CALL] =
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include IND_JMP to capture IND_CALL
*/
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
- [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
};
-static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
- [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
- [PERF_SAMPLE_BRANCH_USER] = LBR_USER,
- [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
- [PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
- [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
- [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
- | LBR_FAR,
- [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
- [PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
+static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
+};
+
+static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
+ [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
+ [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
+ [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
+ [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
+ [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_FAR,
+ [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
+ [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
+ [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
+ | LBR_RETURN | LBR_CALL_STACK,
};
/* core */
@@ -765,6 +928,20 @@ void __init intel_pmu_lbr_init_snb(void)
pr_cont("16-deep LBR, ");
}
+/* haswell */
+void intel_pmu_lbr_init_hsw(void)
+{
+ x86_pmu.lbr_nr = 16;
+ x86_pmu.lbr_tos = MSR_LBR_TOS;
+ x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
+ x86_pmu.lbr_to = MSR_LBR_NHM_TO;
+
+ x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+ x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
+
+ pr_cont("16-deep LBR, ");
+}
+
/* atom */
void __init intel_pmu_lbr_init_atom(void)
{
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
new file mode 100644
index 0000000..f277064
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -0,0 +1,1103 @@
+/*
+ * Intel(R) Processor Trace PMU driver for perf
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Intel PT is specified in the Intel Architecture Instruction Set Extensions
+ * Programming Reference:
+ * http://software.intel.com/en-us/intel-isa-extensions
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <asm/perf_event.h>
+#include <asm/insn.h>
+#include <asm/io.h>
+
+#include "perf_event.h"
+#include "intel_pt.h"
+
+static DEFINE_PER_CPU(struct pt, pt_ctx);
+
+static struct pt_pmu pt_pmu;
+
+enum cpuid_regs {
+ CR_EAX = 0,
+ CR_ECX,
+ CR_EDX,
+ CR_EBX
+};
+
+/*
+ * Capabilities of Intel PT hardware, such as number of address bits or
+ * supported output schemes, are cached and exported to userspace as "caps"
+ * attribute group of pt pmu device
+ * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
+ * relevant bits together with intel_pt traces.
+ *
+ * These are necessary for both trace decoding (payloads_lip, contains address
+ * width encoded in IP-related packets), and event configuration (bitmasks with
+ * permitted values for certain bit fields).
+ */
+#define PT_CAP(_n, _l, _r, _m) \
+ [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
+ .reg = _r, .mask = _m }
+
+static struct pt_cap_desc {
+ const char *name;
+ u32 leaf;
+ u8 reg;
+ u32 mask;
+} pt_caps[] = {
+ PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
+ PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
+ PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
+ PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
+ PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
+};
+
+static u32 pt_cap_get(enum pt_capabilities cap)
+{
+ struct pt_cap_desc *cd = &pt_caps[cap];
+ u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg];
+ unsigned int shift = __ffs(cd->mask);
+
+ return (c & cd->mask) >> shift;
+}
+
+static ssize_t pt_cap_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ enum pt_capabilities cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
+}
+
+static struct attribute_group pt_cap_group = {
+ .name = "caps",
+};
+
+PMU_FORMAT_ATTR(tsc, "config:10" );
+PMU_FORMAT_ATTR(noretcomp, "config:11" );
+
+static struct attribute *pt_formats_attr[] = {
+ &format_attr_tsc.attr,
+ &format_attr_noretcomp.attr,
+ NULL,
+};
+
+static struct attribute_group pt_format_group = {
+ .name = "format",
+ .attrs = pt_formats_attr,
+};
+
+static const struct attribute_group *pt_attr_groups[] = {
+ &pt_cap_group,
+ &pt_format_group,
+ NULL,
+};
+
+static int __init pt_pmu_hw_init(void)
+{
+ struct dev_ext_attribute *de_attrs;
+ struct attribute **attrs;
+ size_t size;
+ int ret;
+ long i;
+
+ attrs = NULL;
+ ret = -ENODEV;
+ if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
+ goto fail;
+
+ for (i = 0; i < PT_CPUID_LEAVES; i++) {
+ cpuid_count(20, i,
+ &pt_pmu.caps[CR_EAX + i*4],
+ &pt_pmu.caps[CR_EBX + i*4],
+ &pt_pmu.caps[CR_ECX + i*4],
+ &pt_pmu.caps[CR_EDX + i*4]);
+ }
+
+ ret = -ENOMEM;
+ size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
+ attrs = kzalloc(size, GFP_KERNEL);
+ if (!attrs)
+ goto fail;
+
+ size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
+ de_attrs = kzalloc(size, GFP_KERNEL);
+ if (!de_attrs)
+ goto fail;
+
+ for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
+ struct dev_ext_attribute *de_attr = de_attrs + i;
+
+ de_attr->attr.attr.name = pt_caps[i].name;
+
+ sysfs_attr_init(&de_attrs->attr.attr);
+
+ de_attr->attr.attr.mode = S_IRUGO;
+ de_attr->attr.show = pt_cap_show;
+ de_attr->var = (void *)i;
+
+ attrs[i] = &de_attr->attr.attr;
+ }
+
+ pt_cap_group.attrs = attrs;
+
+ return 0;
+
+fail:
+ kfree(attrs);
+
+ return ret;
+}
+
+#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
+
+static bool pt_event_valid(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+
+ if ((config & PT_CONFIG_MASK) != config)
+ return false;
+
+ return true;
+}
+
+/*
+ * PT configuration helpers
+ * These all are cpu affine and operate on a local PT
+ */
+
+static bool pt_is_running(void)
+{
+ u64 ctl;
+
+ rdmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ return !!(ctl & RTIT_CTL_TRACEEN);
+}
+
+static void pt_config(struct perf_event *event)
+{
+ u64 reg;
+
+ reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
+
+ if (!event->attr.exclude_kernel)
+ reg |= RTIT_CTL_OS;
+ if (!event->attr.exclude_user)
+ reg |= RTIT_CTL_USR;
+
+ reg |= (event->attr.config & PT_CONFIG_MASK);
+
+ wrmsrl(MSR_IA32_RTIT_CTL, reg);
+}
+
+static void pt_config_start(bool start)
+{
+ u64 ctl;
+
+ rdmsrl(MSR_IA32_RTIT_CTL, ctl);
+ if (start)
+ ctl |= RTIT_CTL_TRACEEN;
+ else
+ ctl &= ~RTIT_CTL_TRACEEN;
+ wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ /*
+ * A wrmsr that disables trace generation serializes other PT
+ * registers and causes all data packets to be written to memory,
+ * but a fence is required for the data to become globally visible.
+ *
+ * The below WMB, separating data store and aux_head store matches
+ * the consumer's RMB that separates aux_head load and data load.
+ */
+ if (!start)
+ wmb();
+}
+
+static void pt_config_buffer(void *buf, unsigned int topa_idx,
+ unsigned int output_off)
+{
+ u64 reg;
+
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
+
+ reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
+
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
+}
+
+/*
+ * Keep ToPA table-related metadata on the same page as the actual table,
+ * taking up a few words from the top
+ */
+
+#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
+
+/**
+ * struct topa - page-sized ToPA table with metadata at the top
+ * @table: actual ToPA table entries, as understood by PT hardware
+ * @list: linkage to struct pt_buffer's list of tables
+ * @phys: physical address of this page
+ * @offset: offset of the first entry in this table in the buffer
+ * @size: total size of all entries in this table
+ * @last: index of the last initialized entry in this table
+ */
+struct topa {
+ struct topa_entry table[TENTS_PER_PAGE];
+ struct list_head list;
+ u64 phys;
+ u64 offset;
+ size_t size;
+ int last;
+};
+
+/* make -1 stand for the last table entry */
+#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
+
+/**
+ * topa_alloc() - allocate page-sized ToPA table
+ * @cpu: CPU on which to allocate.
+ * @gfp: Allocation flags.
+ *
+ * Return: On success, return the pointer to ToPA table page.
+ */
+static struct topa *topa_alloc(int cpu, gfp_t gfp)
+{
+ int node = cpu_to_node(cpu);
+ struct topa *topa;
+ struct page *p;
+
+ p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
+ if (!p)
+ return NULL;
+
+ topa = page_address(p);
+ topa->last = 0;
+ topa->phys = page_to_phys(p);
+
+ /*
+ * In case of singe-entry ToPA, always put the self-referencing END
+ * link as the 2nd entry in the table
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(topa, 1)->end = 1;
+ }
+
+ return topa;
+}
+
+/**
+ * topa_free() - free a page-sized ToPA table
+ * @topa: Table to deallocate.
+ */
+static void topa_free(struct topa *topa)
+{
+ free_page((unsigned long)topa);
+}
+
+/**
+ * topa_insert_table() - insert a ToPA table into a buffer
+ * @buf: PT buffer that's being extended.
+ * @topa: New topa table to be inserted.
+ *
+ * If it's the first table in this buffer, set up buffer's pointers
+ * accordingly; otherwise, add a END=1 link entry to @topa to the current
+ * "last" table and adjust the last table pointer to @topa.
+ */
+static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
+{
+ struct topa *last = buf->last;
+
+ list_add_tail(&topa->list, &buf->tables);
+
+ if (!buf->first) {
+ buf->first = buf->last = buf->cur = topa;
+ return;
+ }
+
+ topa->offset = last->offset + last->size;
+ buf->last = topa;
+
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return;
+
+ BUG_ON(last->last != TENTS_PER_PAGE - 1);
+
+ TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(last, -1)->end = 1;
+}
+
+/**
+ * topa_table_full() - check if a ToPA table is filled up
+ * @topa: ToPA table.
+ */
+static bool topa_table_full(struct topa *topa)
+{
+ /* single-entry ToPA is a special case */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return !!topa->last;
+
+ return topa->last == TENTS_PER_PAGE - 1;
+}
+
+/**
+ * topa_insert_pages() - create a list of ToPA tables
+ * @buf: PT buffer being initialized.
+ * @gfp: Allocation flags.
+ *
+ * This initializes a list of ToPA tables with entries from
+ * the data_pages provided by rb_alloc_aux().
+ *
+ * Return: 0 on success or error code.
+ */
+static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
+{
+ struct topa *topa = buf->last;
+ int order = 0;
+ struct page *p;
+
+ p = virt_to_page(buf->data_pages[buf->nr_pages]);
+ if (PagePrivate(p))
+ order = page_private(p);
+
+ if (topa_table_full(topa)) {
+ topa = topa_alloc(buf->cpu, gfp);
+ if (!topa)
+ return -ENOMEM;
+
+ topa_insert_table(buf, topa);
+ }
+
+ TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
+ TOPA_ENTRY(topa, -1)->size = order;
+ if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(topa, -1)->intr = 1;
+ TOPA_ENTRY(topa, -1)->stop = 1;
+ }
+
+ topa->last++;
+ topa->size += sizes(order);
+
+ buf->nr_pages += 1ul << order;
+
+ return 0;
+}
+
+/**
+ * pt_topa_dump() - print ToPA tables and their entries
+ * @buf: PT buffer.
+ */
+static void pt_topa_dump(struct pt_buffer *buf)
+{
+ struct topa *topa;
+
+ list_for_each_entry(topa, &buf->tables, list) {
+ int i;
+
+ pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
+ topa->phys, topa->offset, topa->size);
+ for (i = 0; i < TENTS_PER_PAGE; i++) {
+ pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
+ &topa->table[i],
+ (unsigned long)topa->table[i].base << TOPA_SHIFT,
+ sizes(topa->table[i].size),
+ topa->table[i].end ? 'E' : ' ',
+ topa->table[i].intr ? 'I' : ' ',
+ topa->table[i].stop ? 'S' : ' ',
+ *(u64 *)&topa->table[i]);
+ if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
+ topa->table[i].stop) ||
+ topa->table[i].end)
+ break;
+ }
+ }
+}
+
+/**
+ * pt_buffer_advance() - advance to the next output region
+ * @buf: PT buffer.
+ *
+ * Advance the current pointers in the buffer to the next ToPA entry.
+ */
+static void pt_buffer_advance(struct pt_buffer *buf)
+{
+ buf->output_off = 0;
+ buf->cur_idx++;
+
+ if (buf->cur_idx == buf->cur->last) {
+ if (buf->cur == buf->last)
+ buf->cur = buf->first;
+ else
+ buf->cur = list_entry(buf->cur->list.next, struct topa,
+ list);
+ buf->cur_idx = 0;
+ }
+}
+
+/**
+ * pt_update_head() - calculate current offsets and sizes
+ * @pt: Per-cpu pt context.
+ *
+ * Update buffer's current write pointer position and data size.
+ */
+static void pt_update_head(struct pt *pt)
+{
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ u64 topa_idx, base, old;
+
+ /* offset of the first region in this table from the beginning of buf */
+ base = buf->cur->offset + buf->output_off;
+
+ /* offset of the current output region within this table */
+ for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
+ base += sizes(buf->cur->table[topa_idx].size);
+
+ if (buf->snapshot) {
+ local_set(&buf->data_size, base);
+ } else {
+ old = (local64_xchg(&buf->head, base) &
+ ((buf->nr_pages << PAGE_SHIFT) - 1));
+ if (base < old)
+ base += buf->nr_pages << PAGE_SHIFT;
+
+ local_add(base - old, &buf->data_size);
+ }
+}
+
+/**
+ * pt_buffer_region() - obtain current output region's address
+ * @buf: PT buffer.
+ */
+static void *pt_buffer_region(struct pt_buffer *buf)
+{
+ return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
+}
+
+/**
+ * pt_buffer_region_size() - obtain current output region's size
+ * @buf: PT buffer.
+ */
+static size_t pt_buffer_region_size(struct pt_buffer *buf)
+{
+ return sizes(buf->cur->table[buf->cur_idx].size);
+}
+
+/**
+ * pt_handle_status() - take care of possible status conditions
+ * @pt: Per-cpu pt context.
+ */
+static void pt_handle_status(struct pt *pt)
+{
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ int advance = 0;
+ u64 status;
+
+ rdmsrl(MSR_IA32_RTIT_STATUS, status);
+
+ if (status & RTIT_STATUS_ERROR) {
+ pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
+ pt_topa_dump(buf);
+ status &= ~RTIT_STATUS_ERROR;
+ }
+
+ if (status & RTIT_STATUS_STOPPED) {
+ status &= ~RTIT_STATUS_STOPPED;
+
+ /*
+ * On systems that only do single-entry ToPA, hitting STOP
+ * means we are already losing data; need to let the decoder
+ * know.
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ local_inc(&buf->lost);
+ advance++;
+ }
+ }
+
+ /*
+ * Also on single-entry ToPA implementations, interrupt will come
+ * before the output reaches its output region's boundary.
+ */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
+ pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
+ void *head = pt_buffer_region(buf);
+
+ /* everything within this margin needs to be zeroed out */
+ memset(head + buf->output_off, 0,
+ pt_buffer_region_size(buf) -
+ buf->output_off);
+ advance++;
+ }
+
+ if (advance)
+ pt_buffer_advance(buf);
+
+ wrmsrl(MSR_IA32_RTIT_STATUS, status);
+}
+
+/**
+ * pt_read_offset() - translate registers into buffer pointers
+ * @buf: PT buffer.
+ *
+ * Set buffer's output pointers from MSR values.
+ */
+static void pt_read_offset(struct pt_buffer *buf)
+{
+ u64 offset, base_topa;
+
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
+ buf->cur = phys_to_virt(base_topa);
+
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
+ /* offset within current output region */
+ buf->output_off = offset >> 32;
+ /* index of current output region within this table */
+ buf->cur_idx = (offset & 0xffffff80) >> 7;
+}
+
+/**
+ * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
+ * @buf: PT buffer.
+ * @pg: Page offset in the buffer.
+ *
+ * When advancing to the next output region (ToPA entry), given a page offset
+ * into the buffer, we need to find the offset of the first page in the next
+ * region.
+ */
+static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
+{
+ struct topa_entry *te = buf->topa_index[pg];
+
+ /* one region */
+ if (buf->first == buf->last && buf->first->last == 1)
+ return pg;
+
+ do {
+ pg++;
+ pg &= buf->nr_pages - 1;
+ } while (buf->topa_index[pg] == te);
+
+ return pg;
+}
+
+/**
+ * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
+ * @buf: PT buffer.
+ * @handle: Current output handle.
+ *
+ * Place INT and STOP marks to prevent overwriting old data that the consumer
+ * hasn't yet collected.
+ */
+static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ struct perf_output_handle *handle)
+
+{
+ unsigned long idx, npages, end;
+
+ if (buf->snapshot)
+ return 0;
+
+ /* can't stop in the middle of an output region */
+ if (buf->output_off + handle->size + 1 <
+ sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
+ return -EINVAL;
+
+
+ /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ return 0;
+
+ /* clear STOP and INT from current entry */
+ buf->topa_index[buf->stop_pos]->stop = 0;
+ buf->topa_index[buf->intr_pos]->intr = 0;
+
+ if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ npages = (handle->size + 1) >> PAGE_SHIFT;
+ end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
+ /*if (end > handle->wakeup >> PAGE_SHIFT)
+ end = handle->wakeup >> PAGE_SHIFT;*/
+ idx = end & (buf->nr_pages - 1);
+ buf->stop_pos = idx;
+ idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1;
+ idx &= buf->nr_pages - 1;
+ buf->intr_pos = idx;
+ }
+
+ buf->topa_index[buf->stop_pos]->stop = 1;
+ buf->topa_index[buf->intr_pos]->intr = 1;
+
+ return 0;
+}
+
+/**
+ * pt_buffer_setup_topa_index() - build topa_index[] table of regions
+ * @buf: PT buffer.
+ *
+ * topa_index[] references output regions indexed by offset into the
+ * buffer for purposes of quick reverse lookup.
+ */
+static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
+{
+ struct topa *cur = buf->first, *prev = buf->last;
+ struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
+ *te_prev = TOPA_ENTRY(prev, prev->last - 1);
+ int pg = 0, idx = 0, ntopa = 0;
+
+ while (pg < buf->nr_pages) {
+ int tidx;
+
+ /* pages within one topa entry */
+ for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
+ buf->topa_index[pg] = te_prev;
+
+ te_prev = te_cur;
+
+ if (idx == cur->last - 1) {
+ /* advance to next topa table */
+ idx = 0;
+ cur = list_entry(cur->list.next, struct topa, list);
+ ntopa++;
+ } else
+ idx++;
+ te_cur = TOPA_ENTRY(cur, idx);
+ }
+
+}
+
+/**
+ * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
+ * @buf: PT buffer.
+ * @head: Write pointer (aux_head) from AUX buffer.
+ *
+ * Find the ToPA table and entry corresponding to given @head and set buffer's
+ * "current" pointers accordingly.
+ */
+static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
+{
+ int pg;
+
+ if (buf->snapshot)
+ head &= (buf->nr_pages << PAGE_SHIFT) - 1;
+
+ pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
+ pg = pt_topa_next_entry(buf, pg);
+
+ buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
+ buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
+ (unsigned long)buf->cur) / sizeof(struct topa_entry);
+ buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
+
+ local64_set(&buf->head, head);
+ local_set(&buf->data_size, 0);
+}
+
+/**
+ * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
+ * @buf: PT buffer.
+ */
+static void pt_buffer_fini_topa(struct pt_buffer *buf)
+{
+ struct topa *topa, *iter;
+
+ list_for_each_entry_safe(topa, iter, &buf->tables, list) {
+ /*
+ * right now, this is in free_aux() path only, so
+ * no need to unlink this table from the list
+ */
+ topa_free(topa);
+ }
+}
+
+/**
+ * pt_buffer_init_topa() - initialize ToPA table for pt buffer
+ * @buf: PT buffer.
+ * @size: Total size of all regions within this ToPA.
+ * @gfp: Allocation flags.
+ */
+static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
+ gfp_t gfp)
+{
+ struct topa *topa;
+ int err;
+
+ topa = topa_alloc(buf->cpu, gfp);
+ if (!topa)
+ return -ENOMEM;
+
+ topa_insert_table(buf, topa);
+
+ while (buf->nr_pages < nr_pages) {
+ err = topa_insert_pages(buf, gfp);
+ if (err) {
+ pt_buffer_fini_topa(buf);
+ return -ENOMEM;
+ }
+ }
+
+ pt_buffer_setup_topa_index(buf);
+
+ /* link last table to the first one, unless we're double buffering */
+ if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(buf->last, -1)->end = 1;
+ }
+
+ pt_topa_dump(buf);
+ return 0;
+}
+
+/**
+ * pt_buffer_setup_aux() - set up topa tables for a PT buffer
+ * @cpu: Cpu on which to allocate, -1 means current.
+ * @pages: Array of pointers to buffer pages passed from perf core.
+ * @nr_pages: Number of pages in the buffer.
+ * @snapshot: If this is a snapshot/overwrite counter.
+ *
+ * This is a pmu::setup_aux callback that sets up ToPA tables and all the
+ * bookkeeping for an AUX buffer.
+ *
+ * Return: Our private PT buffer structure.
+ */
+static void *
+pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
+{
+ struct pt_buffer *buf;
+ int node, ret;
+
+ if (!nr_pages)
+ return NULL;
+
+ if (cpu == -1)
+ cpu = raw_smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
+ GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->cpu = cpu;
+ buf->snapshot = snapshot;
+ buf->data_pages = pages;
+
+ INIT_LIST_HEAD(&buf->tables);
+
+ ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+/**
+ * pt_buffer_free_aux() - perf AUX deallocation path callback
+ * @data: PT buffer.
+ */
+static void pt_buffer_free_aux(void *data)
+{
+ struct pt_buffer *buf = data;
+
+ pt_buffer_fini_topa(buf);
+ kfree(buf);
+}
+
+/**
+ * pt_buffer_is_full() - check if the buffer is full
+ * @buf: PT buffer.
+ * @pt: Per-cpu pt handle.
+ *
+ * If the user hasn't read data from the output region that aux_head
+ * points to, the buffer is considered full: the user needs to read at
+ * least this region and update aux_tail to point past it.
+ */
+static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
+{
+ if (buf->snapshot)
+ return false;
+
+ if (local_read(&buf->data_size) >= pt->handle.size)
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_pt_interrupt() - PT PMI handler
+ */
+void intel_pt_interrupt(void)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf;
+ struct perf_event *event = pt->handle.event;
+
+ /*
+ * There may be a dangling PT bit in the interrupt status register
+ * after PT has been disabled by pt_event_stop(). Make sure we don't
+ * do anything (particularly, re-enable) for this event here.
+ */
+ if (!ACCESS_ONCE(pt->handle_nmi))
+ return;
+
+ pt_config_start(false);
+
+ if (!event)
+ return;
+
+ buf = perf_get_aux(&pt->handle);
+ if (!buf)
+ return;
+
+ pt_read_offset(buf);
+
+ pt_handle_status(pt);
+
+ pt_update_head(pt);
+
+ perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
+ local_xchg(&buf->lost, 0));
+
+ if (!event->hw.state) {
+ int ret;
+
+ buf = perf_aux_output_begin(&pt->handle, event);
+ if (!buf) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ pt_buffer_reset_offsets(buf, pt->handle.head);
+ ret = pt_buffer_reset_markers(buf, &pt->handle);
+ if (ret) {
+ perf_aux_output_end(&pt->handle, 0, true);
+ return;
+ }
+
+ pt_config_buffer(buf->cur->table, buf->cur_idx,
+ buf->output_off);
+ wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+ pt_config(event);
+ }
+}
+
+/*
+ * PMU callbacks
+ */
+
+static void pt_event_start(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+
+ if (pt_is_running() || !buf || pt_buffer_is_full(buf, pt)) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ ACCESS_ONCE(pt->handle_nmi) = 1;
+ event->hw.state = 0;
+
+ pt_config_buffer(buf->cur->table, buf->cur_idx,
+ buf->output_off);
+ wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+ pt_config(event);
+}
+
+static void pt_event_stop(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+
+ /*
+ * Protect against the PMI racing with disabling wrmsr,
+ * see comment in intel_pt_interrupt().
+ */
+ ACCESS_ONCE(pt->handle_nmi) = 0;
+ pt_config_start(false);
+
+ if (event->hw.state == PERF_HES_STOPPED)
+ return;
+
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_UPDATE) {
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+
+ if (!buf)
+ return;
+
+ if (WARN_ON_ONCE(pt->handle.event != event))
+ return;
+
+ pt_read_offset(buf);
+
+ pt_handle_status(pt);
+
+ pt_update_head(pt);
+ }
+}
+
+static void pt_event_del(struct perf_event *event, int mode)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf;
+
+ pt_event_stop(event, PERF_EF_UPDATE);
+
+ buf = perf_get_aux(&pt->handle);
+
+ if (buf) {
+ if (buf->snapshot)
+ pt->handle.head =
+ local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+ perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
+ local_xchg(&buf->lost, 0));
+ }
+}
+
+static int pt_event_add(struct perf_event *event, int mode)
+{
+ struct pt_buffer *buf;
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret = -EBUSY;
+
+ if (pt->handle.event)
+ goto out;
+
+ buf = perf_aux_output_begin(&pt->handle, event);
+ if (!buf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pt_buffer_reset_offsets(buf, pt->handle.head);
+ if (!buf->snapshot) {
+ ret = pt_buffer_reset_markers(buf, &pt->handle);
+ if (ret) {
+ perf_aux_output_end(&pt->handle, 0, true);
+ goto out;
+ }
+ }
+
+ if (mode & PERF_EF_START) {
+ pt_event_start(event, 0);
+ if (hwc->state == PERF_HES_STOPPED) {
+ pt_event_del(event, 0);
+ ret = -EBUSY;
+ }
+ } else {
+ hwc->state = PERF_HES_STOPPED;
+ }
+
+ ret = 0;
+out:
+
+ if (ret)
+ hwc->state = PERF_HES_STOPPED;
+
+ return ret;
+}
+
+static void pt_event_read(struct perf_event *event)
+{
+}
+
+static void pt_event_destroy(struct perf_event *event)
+{
+ x86_del_exclusive(x86_lbr_exclusive_pt);
+}
+
+static int pt_event_init(struct perf_event *event)
+{
+ if (event->attr.type != pt_pmu.pmu.type)
+ return -ENOENT;
+
+ if (!pt_event_valid(event))
+ return -EINVAL;
+
+ if (x86_add_exclusive(x86_lbr_exclusive_pt))
+ return -EBUSY;
+
+ event->destroy = pt_event_destroy;
+
+ return 0;
+}
+
+static __init int pt_init(void)
+{
+ int ret, cpu, prior_warn = 0;
+
+ BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ u64 ctl;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
+ if (!ret && (ctl & RTIT_CTL_TRACEEN))
+ prior_warn++;
+ }
+ put_online_cpus();
+
+ if (prior_warn) {
+ x86_add_exclusive(x86_lbr_exclusive_pt);
+ pr_warn("PT is enabled at boot time, doing nothing\n");
+
+ return -EBUSY;
+ }
+
+ ret = pt_pmu_hw_init();
+ if (ret)
+ return ret;
+
+ if (!pt_cap_get(PT_CAP_topa_output)) {
+ pr_warn("ToPA output is not supported on this CPU\n");
+ return -ENODEV;
+ }
+
+ if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ pt_pmu.pmu.capabilities =
+ PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+
+ pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
+ pt_pmu.pmu.attr_groups = pt_attr_groups;
+ pt_pmu.pmu.task_ctx_nr = perf_sw_context;
+ pt_pmu.pmu.event_init = pt_event_init;
+ pt_pmu.pmu.add = pt_event_add;
+ pt_pmu.pmu.del = pt_event_del;
+ pt_pmu.pmu.start = pt_event_start;
+ pt_pmu.pmu.stop = pt_event_stop;
+ pt_pmu.pmu.read = pt_event_read;
+ pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
+ pt_pmu.pmu.free_aux = pt_buffer_free_aux;
+ ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
+
+ return ret;
+}
+
+module_init(pt_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 21af6149e..12d9548 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -1132,8 +1132,7 @@ static int snbep_pci2phy_map_init(int devid)
}
}
- if (ubox_dev)
- pci_dev_put(ubox_dev);
+ pci_dev_put(ubox_dev);
return err ? pcibios_err_to_errno(err) : 0;
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 6063909..3d423a1 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -41,6 +41,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index aceb2f9..c76d3e3 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
- if (!user_mode_vm(regs)) {
+ if (!user_mode(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3d35033..6367a78 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -286,13 +286,13 @@ static void __init x86_flattree_get_config(void)
initial_boot_params = dt = early_memremap(initial_dtb, map_len);
size = of_get_flat_dt_size();
if (map_len < size) {
- early_iounmap(dt, map_len);
+ early_memunmap(dt, map_len);
initial_boot_params = dt = early_memremap(initial_dtb, size);
map_len = size;
}
unflatten_and_copy_device_tree();
- early_iounmap(dt, map_len);
+ early_memunmap(dt, map_len);
}
#else
static inline void x86_flattree_get_config(void) { }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index cf3df1d..9c30acf 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -25,10 +25,12 @@ unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
-static void printk_stack_address(unsigned long address, int reliable)
+static void printk_stack_address(unsigned long address, int reliable,
+ void *data)
{
- pr_cont(" [<%p>] %s%pB\n",
- (void *)address, reliable ? "" : "? ", (void *)address);
+ printk("%s [<%p>] %s%pB\n",
+ (char *)data, (void *)address, reliable ? "" : "? ",
+ (void *)address);
}
void printk_address(unsigned long address)
@@ -155,8 +157,7 @@ static int print_trace_stack(void *data, char *name)
static void print_trace_address(void *data, unsigned long addr, int reliable)
{
touch_nmi_watchdog();
- printk(data);
- printk_stack_address(addr, reliable);
+ printk_stack_address(addr, reliable, data);
}
static const struct stacktrace_ops print_trace_ops = {
@@ -278,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
print_modules();
show_regs(regs);
#ifdef CONFIG_X86_32
- if (user_mode_vm(regs)) {
+ if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
} else {
@@ -307,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
unsigned long flags = oops_begin();
int sig = SIGSEGV;
- if (!user_mode_vm(regs))
+ if (!user_mode(regs))
report_bug(regs->ip, regs);
if (__die(str, regs, err))
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 5abd4cd..464ffd6 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -108,9 +108,12 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
- if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- pr_cont("\n");
- pr_cont(" %08lx", *stack++);
+ if ((i % STACKSLOTS_PER_LINE) == 0) {
+ if (i != 0)
+ pr_cont("\n");
+ printk("%s %08lx", log_lvl, *stack++);
+ } else
+ pr_cont(" %08lx", *stack++);
touch_nmi_watchdog();
}
pr_cont("\n");
@@ -123,13 +126,13 @@ void show_regs(struct pt_regs *regs)
int i;
show_regs_print_info(KERN_EMERG);
- __show_regs(regs, !user_mode_vm(regs));
+ __show_regs(regs, !user_mode(regs));
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
- if (!user_mode_vm(regs)) {
+ if (!user_mode(regs)) {
unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes;
unsigned char c;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index ff86f19..5f1c626 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -280,12 +280,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
pr_cont(" <EOI> ");
}
} else {
- if (((long) stack & (THREAD_SIZE-1)) == 0)
+ if (kstack_end(stack))
break;
}
- if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- pr_cont("\n");
- pr_cont(" %016lx", *stack++);
+ if ((i % STACKSLOTS_PER_LINE) == 0) {
+ if (i != 0)
+ pr_cont("\n");
+ printk("%s %016lx", log_lvl, *stack++);
+ } else
+ pr_cont(" %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 46201de..7d46bb2 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -661,7 +661,7 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- early_iounmap(sdata, data_len);
+ early_memunmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index a62536a..49ff55e 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -95,20 +95,6 @@ static unsigned long early_serial_base = 0x3f8; /* ttyS0 */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
-static void mem32_serial_out(unsigned long addr, int offset, int value)
-{
- uint32_t *vaddr = (uint32_t *)addr;
- /* shift implied by pointer type */
- writel(value, vaddr + offset);
-}
-
-static unsigned int mem32_serial_in(unsigned long addr, int offset)
-{
- uint32_t *vaddr = (uint32_t *)addr;
- /* shift implied by pointer type */
- return readl(vaddr + offset);
-}
-
static unsigned int io_serial_in(unsigned long addr, int offset)
{
return inb(addr + offset);
@@ -205,6 +191,20 @@ static __init void early_serial_init(char *s)
}
#ifdef CONFIG_PCI
+static void mem32_serial_out(unsigned long addr, int offset, int value)
+{
+ u32 *vaddr = (u32 *)addr;
+ /* shift implied by pointer type */
+ writel(value, vaddr + offset);
+}
+
+static unsigned int mem32_serial_in(unsigned long addr, int offset)
+{
+ u32 *vaddr = (u32 *)addr;
+ /* shift implied by pointer type */
+ return readl(vaddr + offset);
+}
+
/*
* early_pci_serial_init()
*
@@ -217,8 +217,8 @@ static __init void early_pci_serial_init(char *s)
unsigned divisor;
unsigned long baud = DEFAULT_BAUD;
u8 bus, slot, func;
- uint32_t classcode, bar0;
- uint16_t cmdreg;
+ u32 classcode, bar0;
+ u16 cmdreg;
char *e;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 31e2d5b..1c30976 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -395,10 +395,13 @@ sysenter_past_esp:
/*CFI_REL_OFFSET cs, 0*/
/*
* Push current_thread_info()->sysenter_return to the stack.
- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ * A tiny bit of offset fixup is necessary: TI_sysenter_return
+ * is relative to thread_info, which is at the bottom of the
+ * kernel stack page. 4*4 means the 4 words pushed above;
+ * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
+ * and THREAD_SIZE takes us to the bottom.
*/
- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
+ pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
CFI_REL_OFFSET eip, 0
pushl_cfi %eax
@@ -432,7 +435,7 @@ sysenter_after_call:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx
- jne sysexit_audit
+ jnz sysexit_audit
sysenter_exit:
/* if something modifies registers it must also disable sysexit */
movl PT_EIP(%esp), %edx
@@ -460,7 +463,7 @@ sysenter_audit:
sysexit_audit:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
- jne syscall_exit_work
+ jnz syscall_exit_work
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY)
movl %eax,%edx /* second arg, syscall return value */
@@ -472,7 +475,7 @@ sysexit_audit:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
- jne syscall_exit_work
+ jnz syscall_exit_work
movl PT_EAX(%esp),%eax /* reload syscall return value */
jmp sysenter_exit
#endif
@@ -510,7 +513,7 @@ syscall_exit:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx # current->work
- jne syscall_exit_work
+ jnz syscall_exit_work
restore_all:
TRACE_IRQS_IRET
@@ -612,7 +615,7 @@ work_notifysig: # deal with pending signals and
#ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
movl %esp, %eax
- jne work_notifysig_v86 # returning to kernel-space or
+ jnz work_notifysig_v86 # returning to kernel-space or
# vm86-space
1:
#else
@@ -720,43 +723,22 @@ END(sysenter_badsys)
.endm
/*
- * Build the entry stubs and pointer table with some assembler magic.
- * We pack 7 stubs into a single 32-byte chunk, which will fit in a
- * single cache line on all modern x86 implementations.
+ * Build the entry stubs with some assembler magic.
+ * We pack 1 stub into every 8-byte block.
*/
-.section .init.rodata,"a"
-ENTRY(interrupt)
-.section .entry.text, "ax"
- .p2align 5
- .p2align CONFIG_X86_L1_CACHE_SHIFT
+ .align 8
ENTRY(irq_entries_start)
RING0_INT_FRAME
-vector=FIRST_EXTERNAL_VECTOR
-.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
- .balign 32
- .rept 7
- .if vector < FIRST_SYSTEM_VECTOR
- .if vector <> FIRST_EXTERNAL_VECTOR
+ vector=FIRST_EXTERNAL_VECTOR
+ .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
+ vector=vector+1
+ jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -4
- .endif
-1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
- .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
- jmp 2f
- .endif
- .previous
- .long 1b
- .section .entry.text, "ax"
-vector=vector+1
- .endif
- .endr
-2: jmp common_interrupt
-.endr
+ .align 8
+ .endr
END(irq_entries_start)
-.previous
-END(interrupt)
-.previous
-
/*
* the CPU automatically disables interrupts when executing an IRQ vector,
* so IRQ-flags tracing has to follow that:
@@ -816,15 +798,9 @@ ENTRY(simd_coprocessor_error)
pushl_cfi $0
#ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-661: pushl_cfi $do_general_protection
-662:
-.section .altinstructions,"a"
- altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
-.previous
-.section .altinstr_replacement,"ax"
-663: pushl $do_simd_coprocessor_error
-664:
-.previous
+ ALTERNATIVE "pushl_cfi $do_general_protection", \
+ "pushl $do_simd_coprocessor_error", \
+ X86_FEATURE_XMM
#else
pushl_cfi $do_simd_coprocessor_error
#endif
@@ -1240,20 +1216,13 @@ error_code:
/*CFI_REL_OFFSET es, 0*/
pushl_cfi %ds
/*CFI_REL_OFFSET ds, 0*/
- pushl_cfi %eax
- CFI_REL_OFFSET eax, 0
- pushl_cfi %ebp
- CFI_REL_OFFSET ebp, 0
- pushl_cfi %edi
- CFI_REL_OFFSET edi, 0
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
- pushl_cfi %edx
- CFI_REL_OFFSET edx, 0
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
+ pushl_cfi_reg eax
+ pushl_cfi_reg ebp
+ pushl_cfi_reg edi
+ pushl_cfi_reg esi
+ pushl_cfi_reg edx
+ pushl_cfi_reg ecx
+ pushl_cfi_reg ebx
cld
movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d16..c7b2384 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -14,27 +14,14 @@
* NOTE: This code handles signal-recognition, which happens every time
* after an interrupt and after each system call.
*
- * Normal syscalls and interrupts don't save a full stack frame, this is
- * only done for syscall tracing, signals or fork/exec et.al.
- *
* A note on terminology:
- * - top of stack: Architecture defined interrupt frame from SS to RIP
+ * - iret frame: Architecture defined interrupt frame from SS to RIP
* at the top of the kernel process stack.
- * - partial stack frame: partially saved registers up to R11.
- * - full stack frame: Like partial stack frame, but all register saved.
*
* Some macro usage:
* - CFI macros are used to generate dwarf2 unwind information for better
* backtraces. They don't change any code.
- * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
- * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
- * There are unfortunately lots of special cases where some registers
- * not touched. The macro is a big mess that should be cleaned up.
- * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
- * Gives a full stack frame.
* - ENTRY/END Define functions in the symbol table.
- * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
- * frame that is otherwise undefined after a SYSCALL
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
* - idtentry - Define exception entry points.
*/
@@ -70,10 +57,6 @@
.section .entry.text, "ax"
-#ifndef CONFIG_PREEMPT
-#define retint_kernel retint_restore_args
-#endif
-
#ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret64)
swapgs
@@ -82,9 +65,9 @@ ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
-.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+.macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS
- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
+ bt $9,EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON
1:
@@ -116,8 +99,8 @@ ENDPROC(native_usergs_sysret64)
call debug_stack_reset
.endm
-.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
+.macro TRACE_IRQS_IRETQ_DEBUG
+ bt $9,EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -130,34 +113,7 @@ ENDPROC(native_usergs_sysret64)
#endif
/*
- * C code is not supposed to know about undefined top of stack. Every time
- * a C function with an pt_regs argument is called from the SYSCALL based
- * fast path FIXUP_TOP_OF_STACK is needed.
- * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
- * manipulation.
- */
-
- /* %rsp:at FRAMEEND */
- .macro FIXUP_TOP_OF_STACK tmp offset=0
- movq PER_CPU_VAR(old_rsp),\tmp
- movq \tmp,RSP+\offset(%rsp)
- movq $__USER_DS,SS+\offset(%rsp)
- movq $__USER_CS,CS+\offset(%rsp)
- movq RIP+\offset(%rsp),\tmp /* get rip */
- movq \tmp,RCX+\offset(%rsp) /* copy it to rcx as sysret would do */
- movq R11+\offset(%rsp),\tmp /* get eflags */
- movq \tmp,EFLAGS+\offset(%rsp)
- .endm
-
- .macro RESTORE_TOP_OF_STACK tmp offset=0
- movq RSP+\offset(%rsp),\tmp
- movq \tmp,PER_CPU_VAR(old_rsp)
- movq EFLAGS+\offset(%rsp),\tmp
- movq \tmp,R11+\offset(%rsp)
- .endm
-
-/*
- * initial frame state for interrupts (and exceptions without error code)
+ * empty frame
*/
.macro EMPTY_FRAME start=1 offset=0
.if \start
@@ -173,12 +129,12 @@ ENDPROC(native_usergs_sysret64)
* initial frame state for interrupts (and exceptions without error code)
*/
.macro INTR_FRAME start=1 offset=0
- EMPTY_FRAME \start, SS+8+\offset-RIP
- /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
- CFI_REL_OFFSET rsp, RSP+\offset-RIP
- /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
- /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
- CFI_REL_OFFSET rip, RIP+\offset-RIP
+ EMPTY_FRAME \start, 5*8+\offset
+ /*CFI_REL_OFFSET ss, 4*8+\offset*/
+ CFI_REL_OFFSET rsp, 3*8+\offset
+ /*CFI_REL_OFFSET rflags, 2*8+\offset*/
+ /*CFI_REL_OFFSET cs, 1*8+\offset*/
+ CFI_REL_OFFSET rip, 0*8+\offset
.endm
/*
@@ -186,30 +142,23 @@ ENDPROC(native_usergs_sysret64)
* with vector already pushed)
*/
.macro XCPT_FRAME start=1 offset=0
- INTR_FRAME \start, RIP+\offset-ORIG_RAX
- .endm
-
-/*
- * frame that enables calling into C.
- */
- .macro PARTIAL_FRAME start=1 offset=0
- XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
- CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
- CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
- CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
- CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
- CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
- CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
- CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
- CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
- CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
+ INTR_FRAME \start, 1*8+\offset
.endm
/*
* frame that enables passing a complete pt_regs to a C function.
*/
.macro DEFAULT_FRAME start=1 offset=0
- PARTIAL_FRAME \start, R11+\offset-R15
+ XCPT_FRAME \start, ORIG_RAX+\offset
+ CFI_REL_OFFSET rdi, RDI+\offset
+ CFI_REL_OFFSET rsi, RSI+\offset
+ CFI_REL_OFFSET rdx, RDX+\offset
+ CFI_REL_OFFSET rcx, RCX+\offset
+ CFI_REL_OFFSET rax, RAX+\offset
+ CFI_REL_OFFSET r8, R8+\offset
+ CFI_REL_OFFSET r9, R9+\offset
+ CFI_REL_OFFSET r10, R10+\offset
+ CFI_REL_OFFSET r11, R11+\offset
CFI_REL_OFFSET rbx, RBX+\offset
CFI_REL_OFFSET rbp, RBP+\offset
CFI_REL_OFFSET r12, R12+\offset
@@ -218,105 +167,30 @@ ENDPROC(native_usergs_sysret64)
CFI_REL_OFFSET r15, R15+\offset
.endm
-ENTRY(save_paranoid)
- XCPT_FRAME 1 RDI+8
- cld
- movq %rdi, RDI+8(%rsp)
- movq %rsi, RSI+8(%rsp)
- movq_cfi rdx, RDX+8
- movq_cfi rcx, RCX+8
- movq_cfi rax, RAX+8
- movq %r8, R8+8(%rsp)
- movq %r9, R9+8(%rsp)
- movq %r10, R10+8(%rsp)
- movq %r11, R11+8(%rsp)
- movq_cfi rbx, RBX+8
- movq %rbp, RBP+8(%rsp)
- movq %r12, R12+8(%rsp)
- movq %r13, R13+8(%rsp)
- movq %r14, R14+8(%rsp)
- movq %r15, R15+8(%rsp)
- movl $1,%ebx
- movl $MSR_GS_BASE,%ecx
- rdmsr
- testl %edx,%edx
- js 1f /* negative -> in kernel */
- SWAPGS
- xorl %ebx,%ebx
-1: ret
- CFI_ENDPROC
-END(save_paranoid)
-
/*
- * A newly forked process directly context switches into this address.
+ * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
- * rdi: prev task we switched from
- */
-ENTRY(ret_from_fork)
- DEFAULT_FRAME
-
- LOCK ; btr $TIF_FORK,TI_flags(%r8)
-
- pushq_cfi $0x0002
- popfq_cfi # reset kernel eflags
-
- call schedule_tail # rdi: 'prev' task parameter
-
- GET_THREAD_INFO(%rcx)
-
- RESTORE_REST
-
- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
- jz 1f
-
- /*
- * By the time we get here, we have no idea whether our pt_regs,
- * ti flags, and ti status came from the 64-bit SYSCALL fast path,
- * the slow path, or one of the ia32entry paths.
- * Use int_ret_from_sys_call to return, since it can safely handle
- * all of the above.
- */
- jmp int_ret_from_sys_call
-
-1:
- subq $REST_SKIP, %rsp # leave space for volatiles
- CFI_ADJUST_CFA_OFFSET REST_SKIP
- movq %rbp, %rdi
- call *%rbx
- movl $0, RAX(%rsp)
- RESTORE_REST
- jmp int_ret_from_sys_call
- CFI_ENDPROC
-END(ret_from_fork)
-
-/*
- * System call entry. Up to 6 arguments in registers are supported.
+ * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
+ * then loads new ss, cs, and rip from previously programmed MSRs.
+ * rflags gets masked by a value from another MSR (so CLD and CLAC
+ * are not needed). SYSCALL does not save anything on the stack
+ * and does not change rsp.
*
- * SYSCALL does not save anything on the stack and does not change the
- * stack pointer. However, it does mask the flags register for us, so
- * CLD and CLAC are not needed.
- */
-
-/*
- * Register setup:
+ * Registers on entry:
* rax system call number
+ * rcx return address
+ * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
* rdi arg0
- * rcx return address for syscall/sysret, C arg3
* rsi arg1
* rdx arg2
- * r10 arg3 (--> moved to rcx for C)
+ * r10 arg3 (needs to be moved to rcx to conform to C ABI)
* r8 arg4
* r9 arg5
- * r11 eflags for syscall/sysret, temporary for C
- * r12-r15,rbp,rbx saved by C code, not touched.
+ * (note: r12-r15,rbp,rbx are callee-preserved in C ABI)
*
- * Interrupts are off on entry.
* Only called from user space.
*
- * XXX if we had a free scratch register we could save the RSP into the stack frame
- * and report it properly in ps. Unfortunately we haven't.
- *
- * When user can change the frames always force IRET. That is because
+ * When user can change pt_regs->foo always force IRET. That is because
* it deals with uncanonical addresses better. SYSRET has trouble
* with them due to bugs in both AMD and Intel CPUs.
*/
@@ -324,9 +198,15 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
+ CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
+
+ /*
+ * Interrupts are off on entry.
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
+ */
SWAPGS_UNSAFE_STACK
/*
* A hypervisor implementation might want to use a label
@@ -335,18 +215,38 @@ ENTRY(system_call)
*/
GLOBAL(system_call_after_swapgs)
- movq %rsp,PER_CPU_VAR(old_rsp)
+ movq %rsp,PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(kernel_stack),%rsp
+
+ /* Construct struct pt_regs on stack */
+ pushq_cfi $__USER_DS /* pt_regs->ss */
+ pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
/*
- * No need to follow this irqs off/on section - it's straight
- * and short:
+ * Re-enable interrupts.
+ * We use 'rsp_scratch' as a scratch space, hence irq-off block above
+ * must execute atomically in the face of possible interrupt-driven
+ * task preemption. We must enable interrupts only after we're done
+ * with using rsp_scratch:
*/
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_ARGS 8, 0, rax_enosys=1
- movq_cfi rax,(ORIG_RAX-ARGOFFSET)
- movq %rcx,RIP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ pushq_cfi %r11 /* pt_regs->flags */
+ pushq_cfi $__USER_CS /* pt_regs->cs */
+ pushq_cfi %rcx /* pt_regs->ip */
+ CFI_REL_OFFSET rip,0
+ pushq_cfi_reg rax /* pt_regs->orig_ax */
+ pushq_cfi_reg rdi /* pt_regs->di */
+ pushq_cfi_reg rsi /* pt_regs->si */
+ pushq_cfi_reg rdx /* pt_regs->dx */
+ pushq_cfi_reg rcx /* pt_regs->cx */
+ pushq_cfi $-ENOSYS /* pt_regs->ax */
+ pushq_cfi_reg r8 /* pt_regs->r8 */
+ pushq_cfi_reg r9 /* pt_regs->r9 */
+ pushq_cfi_reg r10 /* pt_regs->r10 */
+ pushq_cfi_reg r11 /* pt_regs->r11 */
+ sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 6*8
+
+ testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys
system_call_fastpath:
#if __SYSCALL_MASK == ~0
@@ -355,82 +255,96 @@ system_call_fastpath:
andl $__SYSCALL_MASK,%eax
cmpl $__NR_syscall_max,%eax
#endif
- ja ret_from_sys_call /* and return regs->ax */
+ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10,%rcx
- call *sys_call_table(,%rax,8) # XXX: rip relative
- movq %rax,RAX-ARGOFFSET(%rsp)
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX(%rsp)
+1:
/*
- * Syscall return path ending with SYSRET (fast path)
- * Has incomplete stack frame and undefined top of stack.
+ * Syscall return path ending with SYSRET (fast path).
+ * Has incompletely filled pt_regs.
*/
-ret_from_sys_call:
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- jnz int_ret_from_sys_call_fixup /* Go the the slow path */
-
LOCKDEP_SYS_EXIT
+ /*
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
+ */
DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
- CFI_REMEMBER_STATE
+
/*
- * sysretq will re-enable interrupts:
+ * We must check ti flags with interrupts (or at least preemption)
+ * off because we must *never* return to userspace without
+ * processing exit work that is enqueued if we're preempted here.
+ * In particular, returning to userspace with any of the one-shot
+ * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+ * very bad.
*/
- TRACE_IRQS_ON
- movq RIP-ARGOFFSET(%rsp),%rcx
+ testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
+
+ CFI_REMEMBER_STATE
+
+ RESTORE_C_REGS_EXCEPT_RCX_R11
+ movq RIP(%rsp),%rcx
CFI_REGISTER rip,rcx
- RESTORE_ARGS 1,-ARG_SKIP,0
+ movq EFLAGS(%rsp),%r11
/*CFI_REGISTER rflags,r11*/
- movq PER_CPU_VAR(old_rsp), %rsp
+ movq RSP(%rsp),%rsp
+ /*
+ * 64bit SYSRET restores rip from rcx,
+ * rflags from r11 (but RF and VM bits are forced to 0),
+ * cs and ss are loaded from MSRs.
+ * Restoration of rflags re-enables interrupts.
+ */
USERGS_SYSRET64
CFI_RESTORE_STATE
-int_ret_from_sys_call_fixup:
- FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
- jmp int_ret_from_sys_call
-
- /* Do syscall tracing */
+ /* Do syscall entry tracing */
tracesys:
- leaq -REST_SKIP(%rsp), %rdi
- movq $AUDIT_ARCH_X86_64, %rsi
+ movq %rsp, %rdi
+ movl $AUDIT_ARCH_X86_64, %esi
call syscall_trace_enter_phase1
test %rax, %rax
jnz tracesys_phase2 /* if needed, run the slow path */
- LOAD_ARGS 0 /* else restore clobbered regs */
+ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
+ movq ORIG_RAX(%rsp), %rax
jmp system_call_fastpath /* and return to the fast path */
tracesys_phase2:
- SAVE_REST
- FIXUP_TOP_OF_STACK %rdi
+ SAVE_EXTRA_REGS
movq %rsp, %rdi
- movq $AUDIT_ARCH_X86_64, %rsi
+ movl $AUDIT_ARCH_X86_64, %esi
movq %rax,%rdx
call syscall_trace_enter_phase2
/*
- * Reload arg registers from stack in case ptrace changed them.
+ * Reload registers from stack in case ptrace changed them.
* We don't reload %rax because syscall_trace_entry_phase2() returned
* the value it wants us to use in the table lookup.
*/
- LOAD_ARGS ARGOFFSET, 1
- RESTORE_REST
+ RESTORE_C_REGS_EXCEPT_RAX
+ RESTORE_EXTRA_REGS
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
#else
andl $__SYSCALL_MASK,%eax
cmpl $__NR_syscall_max,%eax
#endif
- ja int_ret_from_sys_call /* RAX(%rsp) is already set */
+ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
- /* Use IRET because user could have changed frame */
+ movq %rax,RAX(%rsp)
+1:
+ /* Use IRET because user could have changed pt_regs->foo */
/*
* Syscall return path ending with IRET.
- * Has correct top of stack, but partial stack frame.
+ * Has correct iret frame.
*/
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
+int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
TRACE_IRQS_OFF
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
@@ -440,8 +354,8 @@ GLOBAL(int_with_check)
movl TI_flags(%rcx),%edx
andl %edi,%edx
jnz int_careful
- andl $~TS_COMPAT,TI_status(%rcx)
- jmp retint_swapgs
+ andl $~TS_COMPAT,TI_status(%rcx)
+ jmp syscall_return
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
@@ -458,12 +372,11 @@ int_careful:
TRACE_IRQS_OFF
jmp int_with_check
- /* handle signals and tracing -- both require a full stack frame */
+ /* handle signals and tracing -- both require a full pt_regs */
int_very_careful:
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-int_check_syscall_exit_work:
- SAVE_REST
+ SAVE_EXTRA_REGS
/* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal
@@ -482,86 +395,192 @@ int_signal:
call do_notify_resume
1: movl $_TIF_WORK_MASK,%edi
int_restore_rest:
- RESTORE_REST
+ RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
jmp int_with_check
+
+syscall_return:
+ /* The IRETQ could re-enable interrupts: */
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ TRACE_IRQS_IRETQ
+
+ /*
+ * Try to use SYSRET instead of IRET if we're returning to
+ * a completely clean 64-bit userspace context.
+ */
+ movq RCX(%rsp),%rcx
+ cmpq %rcx,RIP(%rsp) /* RCX == RIP */
+ jne opportunistic_sysret_failed
+
+ /*
+ * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
+ * in kernel space. This essentially lets the user take over
+ * the kernel, since userspace controls RSP. It's not worth
+ * testing for canonicalness exactly -- this check detects any
+ * of the 17 high bits set, which is true for non-canonical
+ * or kernel addresses. (This will pessimize vsyscall=native.
+ * Big deal.)
+ *
+ * If virtual addresses ever become wider, this will need
+ * to be updated to remain correct on both old and new CPUs.
+ */
+ .ifne __VIRTUAL_MASK_SHIFT - 47
+ .error "virtual address width changed -- SYSRET checks need update"
+ .endif
+ shr $__VIRTUAL_MASK_SHIFT, %rcx
+ jnz opportunistic_sysret_failed
+
+ cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */
+ jne opportunistic_sysret_failed
+
+ movq R11(%rsp),%r11
+ cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */
+ jne opportunistic_sysret_failed
+
+ /*
+ * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
+ * restoring TF results in a trap from userspace immediately after
+ * SYSRET. This would cause an infinite loop whenever #DB happens
+ * with register state that satisfies the opportunistic SYSRET
+ * conditions. For example, single-stepping this user code:
+ *
+ * movq $stuck_here,%rcx
+ * pushfq
+ * popq %r11
+ * stuck_here:
+ *
+ * would never get past 'stuck_here'.
+ */
+ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
+ jnz opportunistic_sysret_failed
+
+ /* nothing to check for RSP */
+
+ cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */
+ jne opportunistic_sysret_failed
+
+ /*
+ * We win! This label is here just for ease of understanding
+ * perf profiles. Nothing jumps here.
+ */
+syscall_return_via_sysret:
+ CFI_REMEMBER_STATE
+ /* r11 is already restored (see code above) */
+ RESTORE_C_REGS_EXCEPT_R11
+ movq RSP(%rsp),%rsp
+ USERGS_SYSRET64
+ CFI_RESTORE_STATE
+
+opportunistic_sysret_failed:
+ SWAPGS
+ jmp restore_c_regs_and_iret
CFI_ENDPROC
END(system_call)
+
.macro FORK_LIKE func
ENTRY(stub_\func)
CFI_STARTPROC
- popq %r11 /* save return address */
- PARTIAL_FRAME 0
- SAVE_REST
- pushq %r11 /* put it back on stack */
- FIXUP_TOP_OF_STACK %r11, 8
- DEFAULT_FRAME 0 8 /* offset 8: return address */
- call sys_\func
- RESTORE_TOP_OF_STACK %r11, 8
- ret $REST_SKIP /* pop extended registers */
+ DEFAULT_FRAME 0, 8 /* offset 8: return address */
+ SAVE_EXTRA_REGS 8
+ jmp sys_\func
CFI_ENDPROC
END(stub_\func)
.endm
- .macro FIXED_FRAME label,func
-ENTRY(\label)
- CFI_STARTPROC
- PARTIAL_FRAME 0 8 /* offset 8: return address */
- FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
- call \func
- RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
- ret
- CFI_ENDPROC
-END(\label)
- .endm
-
FORK_LIKE clone
FORK_LIKE fork
FORK_LIKE vfork
- FIXED_FRAME stub_iopl, sys_iopl
ENTRY(stub_execve)
CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
- call sys_execve
- movq %rax,RAX(%rsp)
- RESTORE_REST
- jmp int_ret_from_sys_call
+ DEFAULT_FRAME 0, 8
+ call sys_execve
+return_from_execve:
+ testl %eax, %eax
+ jz 1f
+ /* exec failed, can use fast SYSRET code path in this case */
+ ret
+1:
+ /* must use IRET code path (pt_regs->cs may have changed) */
+ addq $8, %rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ ZERO_EXTRA_REGS
+ movq %rax,RAX(%rsp)
+ jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_execve)
-
-ENTRY(stub_execveat)
+/*
+ * Remaining execve stubs are only 7 bytes long.
+ * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
+ */
+ .align 8
+GLOBAL(stub_execveat)
CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
- call sys_execveat
- RESTORE_TOP_OF_STACK %r11
- movq %rax,RAX(%rsp)
- RESTORE_REST
- jmp int_ret_from_sys_call
+ DEFAULT_FRAME 0, 8
+ call sys_execveat
+ jmp return_from_execve
CFI_ENDPROC
END(stub_execveat)
+#ifdef CONFIG_X86_X32_ABI
+ .align 8
+GLOBAL(stub_x32_execve)
+ CFI_STARTPROC
+ DEFAULT_FRAME 0, 8
+ call compat_sys_execve
+ jmp return_from_execve
+ CFI_ENDPROC
+END(stub_x32_execve)
+ .align 8
+GLOBAL(stub_x32_execveat)
+ CFI_STARTPROC
+ DEFAULT_FRAME 0, 8
+ call compat_sys_execveat
+ jmp return_from_execve
+ CFI_ENDPROC
+END(stub_x32_execveat)
+#endif
+
+#ifdef CONFIG_IA32_EMULATION
+ .align 8
+GLOBAL(stub32_execve)
+ CFI_STARTPROC
+ call compat_sys_execve
+ jmp return_from_execve
+ CFI_ENDPROC
+END(stub32_execve)
+ .align 8
+GLOBAL(stub32_execveat)
+ CFI_STARTPROC
+ call compat_sys_execveat
+ jmp return_from_execve
+ CFI_ENDPROC
+END(stub32_execveat)
+#endif
+
/*
* sigreturn is special because it needs to restore all registers on return.
* This cannot be done with SYSRET, so use the IRET return path instead.
*/
ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
+ DEFAULT_FRAME 0, 8
+ /*
+ * SAVE_EXTRA_REGS result is not normally needed:
+ * sigreturn overwrites all pt_regs->GPREGS.
+ * But sigreturn can fail (!), and there is no easy way to detect that.
+ * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
+ * we SAVE_EXTRA_REGS here.
+ */
+ SAVE_EXTRA_REGS 8
call sys_rt_sigreturn
- movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
- RESTORE_REST
+return_from_stub:
+ addq $8, %rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE_EXTRA_REGS
+ movq %rax,RAX(%rsp)
jmp int_ret_from_sys_call
CFI_ENDPROC
END(stub_rt_sigreturn)
@@ -569,86 +588,70 @@ END(stub_rt_sigreturn)
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
+ DEFAULT_FRAME 0, 8
+ SAVE_EXTRA_REGS 8
call sys32_x32_rt_sigreturn
- movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
- RESTORE_REST
- jmp int_ret_from_sys_call
+ jmp return_from_stub
CFI_ENDPROC
END(stub_x32_rt_sigreturn)
+#endif
-ENTRY(stub_x32_execve)
- CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
- call compat_sys_execve
- RESTORE_TOP_OF_STACK %r11
- movq %rax,RAX(%rsp)
- RESTORE_REST
- jmp int_ret_from_sys_call
- CFI_ENDPROC
-END(stub_x32_execve)
+/*
+ * A newly forked process directly context switches into this address.
+ *
+ * rdi: prev task we switched from
+ */
+ENTRY(ret_from_fork)
+ DEFAULT_FRAME
-ENTRY(stub_x32_execveat)
- CFI_STARTPROC
- addq $8, %rsp
- PARTIAL_FRAME 0
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
- call compat_sys_execveat
- RESTORE_TOP_OF_STACK %r11
- movq %rax,RAX(%rsp)
- RESTORE_REST
+ LOCK ; btr $TIF_FORK,TI_flags(%r8)
+
+ pushq_cfi $0x0002
+ popfq_cfi # reset kernel eflags
+
+ call schedule_tail # rdi: 'prev' task parameter
+
+ RESTORE_EXTRA_REGS
+
+ testl $3,CS(%rsp) # from kernel_thread?
+
+ /*
+ * By the time we get here, we have no idea whether our pt_regs,
+ * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+ * the slow path, or one of the ia32entry paths.
+ * Use IRET code path to return, since it can safely handle
+ * all of the above.
+ */
+ jnz int_ret_from_sys_call
+
+ /* We came from kernel_thread */
+ /* nb: we depend on RESTORE_EXTRA_REGS above */
+ movq %rbp, %rdi
+ call *%rbx
+ movl $0, RAX(%rsp)
+ RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call
CFI_ENDPROC
-END(stub_x32_execveat)
-
-#endif
+END(ret_from_fork)
/*
- * Build the entry stubs and pointer table with some assembler magic.
- * We pack 7 stubs into a single 32-byte chunk, which will fit in a
- * single cache line on all modern x86 implementations.
+ * Build the entry stubs with some assembler magic.
+ * We pack 1 stub into every 8-byte block.
*/
- .section .init.rodata,"a"
-ENTRY(interrupt)
- .section .entry.text
- .p2align 5
- .p2align CONFIG_X86_L1_CACHE_SHIFT
+ .align 8
ENTRY(irq_entries_start)
INTR_FRAME
-vector=FIRST_EXTERNAL_VECTOR
-.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
- .balign 32
- .rept 7
- .if vector < FIRST_SYSTEM_VECTOR
- .if vector <> FIRST_EXTERNAL_VECTOR
+ vector=FIRST_EXTERNAL_VECTOR
+ .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
+ vector=vector+1
+ jmp common_interrupt
CFI_ADJUST_CFA_OFFSET -8
- .endif
-1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
- .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
- jmp 2f
- .endif
- .previous
- .quad 1b
- .section .entry.text
-vector=vector+1
- .endif
- .endr
-2: jmp common_interrupt
-.endr
+ .align 8
+ .endr
CFI_ENDPROC
END(irq_entries_start)
-.previous
-END(interrupt)
-.previous
-
/*
* Interrupt entry/exit.
*
@@ -659,47 +662,45 @@ END(interrupt)
/* 0(%rsp): ~(interrupt number) */
.macro interrupt func
- /* reserve pt_regs for scratch regs and rbp */
- subq $ORIG_RAX-RBP, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
cld
- /* start from rbp in pt_regs and jump over */
- movq_cfi rdi, (RDI-RBP)
- movq_cfi rsi, (RSI-RBP)
- movq_cfi rdx, (RDX-RBP)
- movq_cfi rcx, (RCX-RBP)
- movq_cfi rax, (RAX-RBP)
- movq_cfi r8, (R8-RBP)
- movq_cfi r9, (R9-RBP)
- movq_cfi r10, (R10-RBP)
- movq_cfi r11, (R11-RBP)
-
- /* Save rbp so that we can unwind from get_irq_regs() */
- movq_cfi rbp, 0
-
- /* Save previous stack value */
- movq %rsp, %rsi
+ /*
+ * Since nothing in interrupt handling code touches r12...r15 members
+ * of "struct pt_regs", and since interrupts can nest, we can save
+ * four stack slots and simultaneously provide
+ * an unwind-friendly stack layout by saving "truncated" pt_regs
+ * exactly up to rbp slot, without these members.
+ */
+ ALLOC_PT_GPREGS_ON_STACK -RBP
+ SAVE_C_REGS -RBP
+ /* this goes to 0(%rsp) for unwinder, not for saving the value: */
+ SAVE_EXTRA_REGS_RBP -RBP
- leaq -RBP(%rsp),%rdi /* arg1 for handler */
- testl $3, CS-RBP(%rsi)
+ leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */
+
+ testl $3, CS-RBP(%rsp)
je 1f
SWAPGS
+1:
/*
+ * Save previous stack pointer, optionally switch to interrupt stack.
* irq_count is used to check if a CPU is already on an interrupt stack
* or not. While this is essentially redundant with preempt_count it is
* a little cheaper to use a separate counter in the PDA (short of
* moving irq_enter into assembly, which would be too much work)
*/
-1: incl PER_CPU_VAR(irq_count)
+ movq %rsp, %rsi
+ incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
CFI_DEF_CFA_REGISTER rsi
-
- /* Store previous stack value */
pushq %rsi
+ /*
+ * For debugger:
+ * "CFA (Current Frame Address) is the value on stack + offset"
+ */
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
- 0x77 /* DW_OP_breg7 */, 0, \
+ 0x77 /* DW_OP_breg7 (rsp) */, 0, \
0x06 /* DW_OP_deref */, \
- 0x08 /* DW_OP_const1u */, SS+8-RBP, \
+ 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
0x22 /* DW_OP_plus */
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
@@ -717,7 +718,7 @@ common_interrupt:
ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
- /* 0(%rsp): old_rsp-ARGOFFSET */
+ /* 0(%rsp): old RSP */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -725,19 +726,18 @@ ret_from_intr:
/* Restore saved previous stack */
popq %rsi
- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
- leaq ARGOFFSET-RBP(%rsi), %rsp
+ CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
+ /* return code expects complete pt_regs - adjust rsp accordingly: */
+ leaq -RBP(%rsi),%rsp
CFI_DEF_CFA_REGISTER rsp
- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
+ CFI_ADJUST_CFA_OFFSET RBP
-exit_intr:
- GET_THREAD_INFO(%rcx)
- testl $3,CS-ARGOFFSET(%rsp)
+ testl $3,CS(%rsp)
je retint_kernel
-
/* Interrupt came from user space */
+
+ GET_THREAD_INFO(%rcx)
/*
- * Has a correct top of stack, but a partial stack frame
* %rcx: thread info. Interrupts off.
*/
retint_with_reschedule:
@@ -756,70 +756,34 @@ retint_swapgs: /* return to user-space */
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_IRETQ
- /*
- * Try to use SYSRET instead of IRET if we're returning to
- * a completely clean 64-bit userspace context.
- */
- movq (RCX-R11)(%rsp), %rcx
- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
- jne opportunistic_sysret_failed
-
- /*
- * On Intel CPUs, sysret with non-canonical RCX/RIP will #GP
- * in kernel space. This essentially lets the user take over
- * the kernel, since userspace controls RSP. It's not worth
- * testing for canonicalness exactly -- this check detects any
- * of the 17 high bits set, which is true for non-canonical
- * or kernel addresses. (This will pessimize vsyscall=native.
- * Big deal.)
- *
- * If virtual addresses ever become wider, this will need
- * to be updated to remain correct on both old and new CPUs.
- */
- .ifne __VIRTUAL_MASK_SHIFT - 47
- .error "virtual address width changed -- sysret checks need update"
- .endif
- shr $__VIRTUAL_MASK_SHIFT, %rcx
- jnz opportunistic_sysret_failed
-
- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
- jne opportunistic_sysret_failed
-
- movq (R11-ARGOFFSET)(%rsp), %r11
- cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
- jne opportunistic_sysret_failed
-
- testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
- jnz opportunistic_sysret_failed
-
- /* nothing to check for RSP */
-
- cmpq $__USER_DS,(SS-ARGOFFSET)(%rsp) /* SS must match SYSRET */
- jne opportunistic_sysret_failed
-
- /*
- * We win! This label is here just for ease of understanding
- * perf profiles. Nothing jumps here.
- */
-irq_return_via_sysret:
- CFI_REMEMBER_STATE
- RESTORE_ARGS 1,8,1
- movq (RSP-RIP)(%rsp),%rsp
- USERGS_SYSRET64
- CFI_RESTORE_STATE
-
-opportunistic_sysret_failed:
SWAPGS
- jmp restore_args
+ jmp restore_c_regs_and_iret
-retint_restore_args: /* return to kernel space */
- DISABLE_INTERRUPTS(CLBR_ANY)
+/* Returning to kernel space */
+retint_kernel:
+#ifdef CONFIG_PREEMPT
+ /* Interrupts are off */
+ /* Check if we need preemption */
+ bt $9,EFLAGS(%rsp) /* interrupts were off? */
+ jnc 1f
+0: cmpl $0,PER_CPU_VAR(__preempt_count)
+ jnz 1f
+ call preempt_schedule_irq
+ jmp 0b
+1:
+#endif
/*
* The iretq could re-enable interrupts:
*/
TRACE_IRQS_IRETQ
-restore_args:
- RESTORE_ARGS 1,8,1
+
+/*
+ * At this label, code paths which return to kernel and to user,
+ * which come from interrupts/exception and from syscalls, merge.
+ */
+restore_c_regs_and_iret:
+ RESTORE_C_REGS
+ REMOVE_PT_GPREGS_FROM_STACK 8
irq_return:
INTERRUPT_RETURN
@@ -890,28 +854,17 @@ retint_signal:
jz retint_swapgs
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_REST
+ SAVE_EXTRA_REGS
movq $-1,ORIG_RAX(%rsp)
xorl %esi,%esi # oldset
movq %rsp,%rdi # &pt_regs
call do_notify_resume
- RESTORE_REST
+ RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
jmp retint_with_reschedule
-#ifdef CONFIG_PREEMPT
- /* Returning to kernel space. Check if we need preemption */
- /* rcx: threadinfo. interrupts off. */
-ENTRY(retint_kernel)
- cmpl $0,PER_CPU_VAR(__preempt_count)
- jnz retint_restore_args
- bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
- jnc retint_restore_args
- call preempt_schedule_irq
- jmp exit_intr
-#endif
CFI_ENDPROC
END(common_interrupt)
@@ -1000,7 +953,7 @@ apicinterrupt IRQ_WORK_VECTOR \
/*
* Exception entry points.
*/
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
@@ -1022,8 +975,7 @@ ENTRY(\sym)
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
.endif
- subq $ORIG_RAX-R15, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ ALLOC_PT_GPREGS_ON_STACK
.if \paranoid
.if \paranoid == 1
@@ -1031,10 +983,11 @@ ENTRY(\sym)
testl $3, CS(%rsp) /* If coming from userspace, switch */
jnz 1f /* stacks. */
.endif
- call save_paranoid
+ call paranoid_entry
.else
call error_entry
.endif
+ /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
DEFAULT_FRAME 0
@@ -1056,19 +1009,20 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
- subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
+ subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
call \do_sym
.if \shift_ist != -1
- addq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
+ addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
+ /* these procedures expect "no swapgs" flag in ebx */
.if \paranoid
- jmp paranoid_exit /* %ebx: no swapgs flag */
+ jmp paranoid_exit
.else
- jmp error_exit /* %ebx: no swapgs flag */
+ jmp error_exit
.endif
.if \paranoid == 1
@@ -1272,7 +1226,9 @@ ENTRY(xen_failsafe_callback)
addq $0x30,%rsp
CFI_ADJUST_CFA_OFFSET -0x30
pushq_cfi $-1 /* orig_ax = -1 => not a system call */
- SAVE_ALL
+ ALLOC_PT_GPREGS_ON_STACK
+ SAVE_C_REGS
+ SAVE_EXTRA_REGS
jmp error_exit
CFI_ENDPROC
END(xen_failsafe_callback)
@@ -1304,59 +1260,66 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
#endif
- /*
- * "Paranoid" exit path from exception stack. This is invoked
- * only on return from non-NMI IST interrupts that came
- * from kernel space.
- *
- * We may be returning to very strange contexts (e.g. very early
- * in syscall entry), so checking for preemption here would
- * be complicated. Fortunately, we there's no good reason
- * to try to handle preemption here.
- */
+/*
+ * Save all registers in pt_regs, and switch gs if needed.
+ * Use slow, but surefire "are we in kernel?" check.
+ * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
+ */
+ENTRY(paranoid_entry)
+ XCPT_FRAME 1 15*8
+ cld
+ SAVE_C_REGS 8
+ SAVE_EXTRA_REGS 8
+ movl $1,%ebx
+ movl $MSR_GS_BASE,%ecx
+ rdmsr
+ testl %edx,%edx
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx,%ebx
+1: ret
+ CFI_ENDPROC
+END(paranoid_entry)
- /* ebx: no swapgs flag */
+/*
+ * "Paranoid" exit path from exception stack. This is invoked
+ * only on return from non-NMI IST interrupts that came
+ * from kernel space.
+ *
+ * We may be returning to very strange contexts (e.g. very early
+ * in syscall entry), so checking for preemption here would
+ * be complicated. Fortunately, we there's no good reason
+ * to try to handle preemption here.
+ */
+/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
testl %ebx,%ebx /* swapgs needed? */
- jnz paranoid_restore
- TRACE_IRQS_IRETQ 0
+ jnz paranoid_exit_no_swapgs
+ TRACE_IRQS_IRETQ
SWAPGS_UNSAFE_STACK
- RESTORE_ALL 8
- INTERRUPT_RETURN
-paranoid_restore:
- TRACE_IRQS_IRETQ_DEBUG 0
- RESTORE_ALL 8
+ jmp paranoid_exit_restore
+paranoid_exit_no_swapgs:
+ TRACE_IRQS_IRETQ_DEBUG
+paranoid_exit_restore:
+ RESTORE_EXTRA_REGS
+ RESTORE_C_REGS
+ REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN
CFI_ENDPROC
END(paranoid_exit)
/*
- * Exception entry point. This expects an error code/orig_rax on the stack.
- * returns in "no swapgs flag" in %ebx.
+ * Save all registers in pt_regs, and switch gs if needed.
+ * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(error_entry)
- XCPT_FRAME
- CFI_ADJUST_CFA_OFFSET 15*8
- /* oldrax contains error code */
+ XCPT_FRAME 1 15*8
cld
- movq %rdi, RDI+8(%rsp)
- movq %rsi, RSI+8(%rsp)
- movq %rdx, RDX+8(%rsp)
- movq %rcx, RCX+8(%rsp)
- movq %rax, RAX+8(%rsp)
- movq %r8, R8+8(%rsp)
- movq %r9, R9+8(%rsp)
- movq %r10, R10+8(%rsp)
- movq %r11, R11+8(%rsp)
- movq_cfi rbx, RBX+8
- movq %rbp, RBP+8(%rsp)
- movq %r12, R12+8(%rsp)
- movq %r13, R13+8(%rsp)
- movq %r14, R14+8(%rsp)
- movq %r15, R15+8(%rsp)
+ SAVE_C_REGS 8
+ SAVE_EXTRA_REGS 8
xorl %ebx,%ebx
testl $3,CS+8(%rsp)
je error_kernelspace
@@ -1366,12 +1329,12 @@ error_sti:
TRACE_IRQS_OFF
ret
-/*
- * There are two places in the kernel that can potentially fault with
- * usergs. Handle them here. B stepping K8s sometimes report a
- * truncated RIP for IRET exceptions returning to compat mode. Check
- * for these here too.
- */
+ /*
+ * There are two places in the kernel that can potentially fault with
+ * usergs. Handle them here. B stepping K8s sometimes report a
+ * truncated RIP for IRET exceptions returning to compat mode. Check
+ * for these here too.
+ */
error_kernelspace:
CFI_REL_OFFSET rcx, RCX+8
incl %ebx
@@ -1401,11 +1364,11 @@ error_bad_iret:
END(error_entry)
-/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
+/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
ENTRY(error_exit)
DEFAULT_FRAME
movl %ebx,%eax
- RESTORE_REST
+ RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
@@ -1420,19 +1383,7 @@ ENTRY(error_exit)
CFI_ENDPROC
END(error_exit)
-/*
- * Test if a given stack is an NMI stack or not.
- */
- .macro test_in_nmi reg stack nmi_ret normal_ret
- cmpq %\reg, \stack
- ja \normal_ret
- subq $EXCEPTION_STKSZ, %\reg
- cmpq %\reg, \stack
- jb \normal_ret
- jmp \nmi_ret
- .endm
-
- /* runs on exception stack */
+/* Runs on exception stack */
ENTRY(nmi)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
@@ -1468,7 +1419,7 @@ ENTRY(nmi)
* NMI.
*/
- /* Use %rdx as out temp variable throughout */
+ /* Use %rdx as our temp variable throughout */
pushq_cfi %rdx
CFI_REL_OFFSET rdx, 0
@@ -1493,8 +1444,17 @@ ENTRY(nmi)
* We check the variable because the first NMI could be in a
* breakpoint routine using a breakpoint stack.
*/
- lea 6*8(%rsp), %rdx
- test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
+ lea 6*8(%rsp), %rdx
+ /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
+ cmpq %rdx, 4*8(%rsp)
+ /* If the stack pointer is above the NMI stack, this is a normal NMI */
+ ja first_nmi
+ subq $EXCEPTION_STKSZ, %rdx
+ cmpq %rdx, 4*8(%rsp)
+ /* If it is below the NMI stack, it is a normal NMI */
+ jb first_nmi
+ /* Ah, it is within the NMI stack, treat it as nested */
+
CFI_REMEMBER_STATE
nested_nmi:
@@ -1587,7 +1547,7 @@ first_nmi:
.rept 5
pushq_cfi 11*8(%rsp)
.endr
- CFI_DEF_CFA_OFFSET SS+8-RIP
+ CFI_DEF_CFA_OFFSET 5*8
/* Everything up to here is safe from nested NMIs */
@@ -1615,7 +1575,7 @@ repeat_nmi:
pushq_cfi -6*8(%rsp)
.endr
subq $(5*8), %rsp
- CFI_DEF_CFA_OFFSET SS+8-RIP
+ CFI_DEF_CFA_OFFSET 5*8
end_repeat_nmi:
/*
@@ -1624,16 +1584,16 @@ end_repeat_nmi:
* so that we repeat another NMI.
*/
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
- subq $ORIG_RAX-R15, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ ALLOC_PT_GPREGS_ON_STACK
+
/*
- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
+ * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
* as we should not be calling schedule in NMI context.
* Even with normal interrupts enabled. An NMI should not be
* setting NEED_RESCHED or anything that normal interrupts and
* exceptions might do.
*/
- call save_paranoid
+ call paranoid_entry
DEFAULT_FRAME 0
/*
@@ -1664,8 +1624,10 @@ end_repeat_nmi:
nmi_swapgs:
SWAPGS_UNSAFE_STACK
nmi_restore:
+ RESTORE_EXTRA_REGS
+ RESTORE_C_REGS
/* Pop the extra iret frame at once */
- RESTORE_ALL 6*8
+ REMOVE_PT_GPREGS_FROM_STACK 6*8
/* Clear the NMI executing stack variable */
movq $0, 5*8(%rsp)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index c4f8d46..2b55ee6 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -177,9 +177,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/
load_ucode_bsp();
- if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
- early_printk("Kernel alive\n");
-
clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index f36bd42..d031bad 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -22,6 +22,7 @@
#include <asm/cpufeature.h>
#include <asm/percpu.h>
#include <asm/nops.h>
+#include <asm/bootparam.h>
/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
@@ -90,7 +91,7 @@ ENTRY(startup_32)
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
- testb $(1<<6), BP_loadflags(%esi)
+ testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 2f
/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 6fd514d9..ae6588b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
+ * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
@@ -56,7 +56,7 @@ startup_64:
* %rsi holds a physical pointer to real_mode_data.
*
* We come here either directly from a 64bit bootloader, or from
- * arch/x86_64/boot/compressed/head.S.
+ * arch/x86/boot/compressed/head_64.S.
*
* We only come here initially at boot nothing else comes here.
*
@@ -146,7 +146,7 @@ startup_64:
leaq level2_kernel_pgt(%rip), %rdi
leaq 4096(%rdi), %r8
/* See if it is a valid page table entry */
-1: testq $1, 0(%rdi)
+1: testb $1, 0(%rdi)
jz 2f
addq %rbp, 0(%rdi)
/* Go to the next page */
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index d5651fc..367f39d 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -42,8 +42,8 @@ void kernel_fpu_enable(void)
* be set (so that the clts/stts pair does nothing that is
* visible in the interrupted kernel thread).
*
- * Except for the eagerfpu case when we return 1 unless we've already
- * been eager and saved the state in kernel_fpu_begin().
+ * Except for the eagerfpu case when we return true; in the likely case
+ * the thread has FPU but we are not going to set/clear TS.
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
return false;
if (use_eager_fpu())
- return __thread_has_fpu(current);
+ return true;
return !__thread_has_fpu(current) &&
(read_cr0() & X86_CR0_TS);
@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
static inline bool interrupted_user_mode(void)
{
struct pt_regs *regs = get_irq_regs();
- return regs && user_mode_vm(regs);
+ return regs && user_mode(regs);
}
/*
@@ -94,9 +94,10 @@ void __kernel_fpu_begin(void)
if (__thread_has_fpu(me)) {
__save_init_fpu(me);
- } else if (!use_eager_fpu()) {
+ } else {
this_cpu_write(fpu_owner_task, NULL);
- clts();
+ if (!use_eager_fpu())
+ clts();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -107,7 +108,7 @@ void __kernel_fpu_end(void)
if (__thread_has_fpu(me)) {
if (WARN_ON(restore_fpu_checking(me)))
- drop_init_fpu(me);
+ fpu_reset_state(me);
} else if (!use_eager_fpu()) {
stts();
}
@@ -120,10 +121,13 @@ void unlazy_fpu(struct task_struct *tsk)
{
preempt_disable();
if (__thread_has_fpu(tsk)) {
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
- } else
- tsk->thread.fpu_counter = 0;
+ if (use_eager_fpu()) {
+ __save_fpu(tsk);
+ } else {
+ __save_init_fpu(tsk);
+ __thread_fpu_end(tsk);
+ }
+ }
preempt_enable();
}
EXPORT_SYMBOL(unlazy_fpu);
@@ -221,11 +225,12 @@ void fpu_finit(struct fpu *fpu)
return;
}
+ memset(fpu->state, 0, xstate_size);
+
if (cpu_has_fxsr) {
fx_finit(&fpu->state->fxsave);
} else {
struct i387_fsave_struct *fp = &fpu->state->fsave;
- memset(fp, 0, xstate_size);
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
@@ -247,7 +252,7 @@ int init_fpu(struct task_struct *tsk)
if (tsk_used_math(tsk)) {
if (cpu_has_fpu && tsk == current)
unlazy_fpu(tsk);
- tsk->thread.fpu.last_cpu = ~0;
+ task_disable_lazy_fpu_restore(tsk);
return 0;
}
@@ -336,6 +341,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
+ struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
int ret;
if (!cpu_has_xsave)
@@ -350,14 +356,12 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
* memory layout in the thread struct, so that we can copy the entire
* xstateregs to the user using one user_regset_copyout().
*/
- memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
- xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-
+ memcpy(&xsave->i387.sw_reserved,
+ xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.state->xsave, 0, -1);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
return ret;
}
@@ -365,8 +369,8 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
+ struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
int ret;
- struct xsave_hdr_struct *xsave_hdr;
if (!cpu_has_xsave)
return -ENODEV;
@@ -375,22 +379,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (ret)
return ret;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.state->xsave, 0, -1);
-
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
-
- xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
-
- xsave_hdr->xstate_bv &= pcntxt_mask;
+ xsave->i387.mxcsr &= mxcsr_feature_mask;
+ xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
/*
* These bits must be zero.
*/
- memset(xsave_hdr->reserved, 0, 48);
-
+ memset(&xsave->xsave_hdr.reserved, 0, 48);
return ret;
}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 4ddaf66..37dae79 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -54,7 +54,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
* because the ->io_bitmap_max value must match the bitmap
* contents:
*/
- tss = &per_cpu(init_tss, get_cpu());
+ tss = &per_cpu(cpu_tss, get_cpu());
if (turn_on)
bitmap_clear(t->io_bitmap_ptr, from, num);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 67b1cbe..e5952c2 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void)
this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask);
- cpu_clear(this_cpu, online_new);
+ cpumask_clear_cpu(this_cpu, &online_new);
this_count = 0;
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
@@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void)
data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, data->affinity);
- cpu_clear(this_cpu, affinity_new);
+ cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */
if (!irq_has_action(irq) || irqd_is_per_cpu(data))
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 28d28f5..f9fd86a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -165,7 +165,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
if (unlikely(!desc))
return false;
- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
desc->handle_irq(irq, desc);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index e4b503d..394e643 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
u64 estack_top, estack_bottom;
u64 curbase = (u64)task_stack_page(current);
- if (user_mode_vm(regs))
+ if (user_mode(regs))
return;
if (regs->sp >= curbase + sizeof(struct thread_info) +
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 70e181e..cd10a64 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -178,7 +178,8 @@ void __init native_init_IRQ(void)
#endif
for_each_clear_bit_from(i, used_vectors, first_system_vector) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
- set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
+ set_intr_gate(i, irq_entries_start +
+ 8 * (i - FIRST_EXTERNAL_VECTOR));
}
#ifdef CONFIG_X86_LOCAL_APIC
for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 7ec1d5f..d6178d9 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
- { "si", 8, offsetof(struct pt_regs, dx) },
+ { "si", 8, offsetof(struct pt_regs, si) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },
@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
#ifdef CONFIG_X86_32
switch (regno) {
case GDB_SS:
- if (!user_mode_vm(regs))
+ if (!user_mode(regs))
*(unsigned long *)mem = __KERNEL_DS;
break;
case GDB_SP:
- if (!user_mode_vm(regs))
+ if (!user_mode(regs))
*(unsigned long *)mem = kernel_stack_pointer(regs);
break;
case GDB_GS:
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4e3d5a96..1deffe6 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -354,6 +354,7 @@ int __copy_instruction(u8 *dest, u8 *src)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
+ int length;
unsigned long recovered_insn =
recover_probed_instruction(buf, (unsigned long)src);
@@ -361,16 +362,18 @@ int __copy_instruction(u8 *dest, u8 *src)
return 0;
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
insn_get_length(&insn);
+ length = insn.length;
+
/* Another subsystem puts a breakpoint, failed to recover */
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
- memcpy(dest, insn.kaddr, insn.length);
+ memcpy(dest, insn.kaddr, length);
#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
s64 newdisp;
u8 *disp;
- kernel_insn_init(&insn, dest, insn.length);
+ kernel_insn_init(&insn, dest, length);
insn_get_displacement(&insn);
/*
* The copied instruction uses the %rip-relative addressing
@@ -394,7 +397,7 @@ int __copy_instruction(u8 *dest, u8 *src)
*(s32 *) disp = (s32) newdisp;
}
#endif
- return insn.length;
+ return length;
}
static int arch_copy_kprobe(struct kprobe *p)
@@ -602,7 +605,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- if (user_mode_vm(regs))
+ if (user_mode(regs))
return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
@@ -1007,7 +1010,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
struct die_args *args = data;
int ret = NOTIFY_DONE;
- if (args->regs && user_mode_vm(args->regs))
+ if (args->regs && user_mode(args->regs))
return ret;
if (val == DIE_GPF) {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e354cc6..9435620 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -513,7 +513,7 @@ void __init kvm_guest_init(void)
* can get false positives too easily, for example if the host is
* overcommitted.
*/
- watchdog_enable_hardlockup_detector(false);
+ hardlockup_detector_disable();
}
static noinline uint32_t __kvm_cpuid_base(void)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 9bbb9b3..005c03e 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -33,6 +33,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP(fmt, ...) \
@@ -53,7 +54,7 @@ static DEFINE_MUTEX(module_kaslr_mutex);
static unsigned long int get_module_load_offset(void)
{
- if (kaslr_enabled) {
+ if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
/*
* Calculate the module_load_offset the first time this
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 548d25f..c614dd4 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
-#if PAGETABLE_LEVELS >= 3
+#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic,
.pte_clear = native_pte_clear,
@@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = {
.pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT,
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PTE_IDENT,
.make_pud = PTE_IDENT,
.set_pgd = native_set_pgd,
#endif
-#endif /* PAGETABLE_LEVELS >= 3 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.pte_val = PTE_IDENT,
.pgd_val = PTE_IDENT,
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index 781861c..da8cb98 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -131,10 +131,11 @@ void perf_get_regs_user(struct perf_regs *regs_user,
}
/*
- * RIP, flags, and the argument registers are usually saved.
- * orig_ax is probably okay, too.
+ * These registers are always saved on 64-bit syscall entry.
+ * On 32-bit entry points, they are saved too except r8..r11.
*/
regs_user_copy->ip = user_regs->ip;
+ regs_user_copy->ax = user_regs->ax;
regs_user_copy->cx = user_regs->cx;
regs_user_copy->dx = user_regs->dx;
regs_user_copy->si = user_regs->si;
@@ -145,9 +146,12 @@ void perf_get_regs_user(struct perf_regs *regs_user,
regs_user_copy->r11 = user_regs->r11;
regs_user_copy->orig_ax = user_regs->orig_ax;
regs_user_copy->flags = user_regs->flags;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
/*
- * Don't even try to report the "rest" regs.
+ * Most system calls don't save these registers, don't report them.
*/
regs_user_copy->bx = -1;
regs_user_copy->bp = -1;
@@ -158,37 +162,13 @@ void perf_get_regs_user(struct perf_regs *regs_user,
/*
* For this to be at all useful, we need a reasonable guess for
- * sp and the ABI. Be careful: we're in NMI context, and we're
+ * the ABI. Be careful: we're in NMI context, and we're
* considering current to be the current task, so we should
* be careful not to look at any other percpu variables that might
* change during context switches.
*/
- if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
- task_thread_info(current)->status & TS_COMPAT) {
- /* Easy case: we're in a compat syscall. */
- regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
- regs_user_copy->sp = user_regs->sp;
- regs_user_copy->cs = user_regs->cs;
- regs_user_copy->ss = user_regs->ss;
- } else if (user_regs->orig_ax != -1) {
- /*
- * We're probably in a 64-bit syscall.
- * Warning: this code is severely racy. At least it's better
- * than just blindly copying user_regs.
- */
- regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
- regs_user_copy->sp = this_cpu_read(old_rsp);
- regs_user_copy->cs = __USER_CS;
- regs_user_copy->ss = __USER_DS;
- regs_user_copy->cx = -1; /* usually contains garbage */
- } else {
- /* We're probably in an interrupt or exception. */
- regs_user->abi = user_64bit_mode(user_regs) ?
- PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
- regs_user_copy->sp = user_regs->sp;
- regs_user_copy->cs = user_regs->cs;
- regs_user_copy->ss = user_regs->ss;
- }
+ regs_user->abi = user_64bit_mode(user_regs) ?
+ PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
regs_user->regs = regs_user_copy;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 046e2d6..8213da6 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -9,7 +9,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/pm.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <linux/random.h>
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
@@ -24,6 +24,7 @@
#include <asm/syscalls.h>
#include <asm/idle.h>
#include <asm/uaccess.h>
+#include <asm/mwait.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/debugreg.h>
@@ -37,7 +38,26 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+ .x86_tss = {
+ .sp0 = TOP_OF_INIT_STACK,
+#ifdef CONFIG_X86_32
+ .ss0 = __KERNEL_DS,
+ .ss1 = __KERNEL_CS,
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+#endif
+ },
+#ifdef CONFIG_X86_32
+ /*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
+#endif
+};
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -69,8 +89,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.fpu_counter = 0;
dst->thread.fpu.has_fpu = 0;
- dst->thread.fpu.last_cpu = ~0;
dst->thread.fpu.state = NULL;
+ task_disable_lazy_fpu_restore(dst);
if (tsk_used_math(src)) {
int err = fpu_alloc(&dst->thread.fpu);
if (err)
@@ -109,7 +129,7 @@ void exit_thread(void)
unsigned long *bp = t->io_bitmap_ptr;
if (bp) {
- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+ struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
@@ -131,13 +151,18 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- drop_init_fpu(tsk);
- /*
- * Free the FPU state for non xsave platforms. They get reallocated
- * lazily at the first use.
- */
- if (!use_eager_fpu())
+
+ if (!use_eager_fpu()) {
+ /* FPU state will be reallocated lazily at the first use. */
+ drop_fpu(tsk);
free_thread_xstate(tsk);
+ } else if (!used_math()) {
+ /* kthread execs. TODO: cleanup this horror. */
+ if (WARN_ON(init_fpu(tsk)))
+ force_sig(SIGKILL, tsk);
+ user_fpu_begin();
+ restore_init_xstate();
+ }
}
static void hard_disable_TSC(void)
@@ -377,14 +402,11 @@ static void amd_e400_idle(void)
if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
cpumask_set_cpu(cpu, amd_e400_c1e_mask);
- /*
- * Force broadcast so ACPI can not interfere.
- */
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
- &cpu);
+ /* Force broadcast so ACPI can not interfere. */
+ tick_broadcast_force();
pr_info("Switch to broadcast mode on CPU%d\n", cpu);
}
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+ tick_broadcast_enter();
default_idle();
@@ -393,12 +415,59 @@ static void amd_e400_idle(void)
* called with interrupts disabled.
*/
local_irq_disable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+ tick_broadcast_exit();
local_irq_enable();
} else
default_idle();
}
+/*
+ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
+ * We can't rely on cpuidle installing MWAIT, because it will not load
+ * on systems that support only C1 -- so the boot default must be MWAIT.
+ *
+ * Some AMD machines are the opposite, they depend on using HALT.
+ *
+ * So for default C1, which is used during boot until cpuidle loads,
+ * use MWAIT-C1 on Intel HW that has it, else use HALT.
+ */
+static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (!cpu_has(c, X86_FEATURE_MWAIT))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * MONITOR/MWAIT with no hints, used for default default C1 state.
+ * This invokes MWAIT with interrutps enabled and no flags,
+ * which is backwards compatible with the original MWAIT implementation.
+ */
+
+static void mwait_idle(void)
+{
+ if (!current_set_polling_and_test()) {
+ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
+ smp_mb(); /* quirk */
+ clflush((void *)&current_thread_info()->flags);
+ smp_mb(); /* quirk */
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __sti_mwait(0, 0);
+ else
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ }
+ __current_clr_polling();
+}
+
void select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
@@ -412,6 +481,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
/* E400: APIC timer interrupt does not wake up CPU from C1e */
pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle;
+ } else if (prefer_mwait_c1_over_halt(c)) {
+ pr_info("using mwait in idle threads\n");
+ x86_idle = mwait_idle;
} else
x86_idle = default_idle;
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 603c4f9..8ed2106 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -73,7 +73,7 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned long sp;
unsigned short ss, gs;
- if (user_mode_vm(regs)) {
+ if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
gs = get_user_gs(regs);
@@ -206,11 +206,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
regs->ip = new_ip;
regs->sp = new_sp;
regs->flags = X86_EFLAGS_IF;
- /*
- * force it to the iret return path by making it look as if there was
- * some work pending.
- */
- set_thread_flag(TIF_NOTIFY_RESUME);
+ force_iret();
}
EXPORT_SYMBOL_GPL(start_thread);
@@ -248,7 +244,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
fpu_switch_t fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
@@ -256,11 +252,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
/*
- * Reload esp0.
- */
- load_sp0(tss, next);
-
- /*
* Save away %gs. No need to save %fs, as it was saved on the
* stack on entry. No need to save %es and %ds, as those are
* always kernel segments while inside the kernel. Doing this
@@ -310,9 +301,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_end_context_switch(next_p);
+ /*
+ * Reload esp0, kernel_stack, and current_top_of_stack. This changes
+ * current_thread_info().
+ */
+ load_sp0(tss, next);
this_cpu_write(kernel_stack,
- (unsigned long)task_stack_page(next_p) +
- THREAD_SIZE - KERNEL_STACK_OFFSET);
+ (unsigned long)task_stack_page(next_p) +
+ THREAD_SIZE);
+ this_cpu_write(cpu_current_top_of_stack,
+ (unsigned long)task_stack_page(next_p) +
+ THREAD_SIZE);
/*
* Restore %gs if needed (which is common)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 67fcc43..4baaa97 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
asmlinkage extern void ret_from_fork(void);
-__visible DEFINE_PER_CPU(unsigned long, old_rsp);
+__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
@@ -161,7 +161,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
childregs = task_pt_regs(p);
p->thread.sp = (unsigned long) childregs;
- p->thread.usersp = me->thread.usersp;
set_tsk_thread_flag(p, TIF_FORK);
p->thread.io_bitmap_ptr = NULL;
@@ -207,7 +206,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
*/
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_IA32_EMULATION
- if (test_thread_flag(TIF_IA32))
+ if (is_ia32_task())
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
else
@@ -235,13 +234,12 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
loadsegment(es, _ds);
loadsegment(ds, _ds);
load_gs_index(0);
- current->thread.usersp = new_sp;
regs->ip = new_ip;
regs->sp = new_sp;
- this_cpu_write(old_rsp, new_sp);
regs->cs = _cs;
regs->ss = _ss;
regs->flags = X86_EFLAGS_IF;
+ force_iret();
}
void
@@ -277,15 +275,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct thread_struct *prev = &prev_p->thread;
struct thread_struct *next = &next_p->thread;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned fsindex, gsindex;
fpu_switch_t fpu;
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
- /* Reload esp0 and ss1. */
- load_sp0(tss, next);
-
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
*
@@ -401,8 +396,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Switch the PDA and FPU contexts.
*/
- prev->usersp = this_cpu_read(old_rsp);
- this_cpu_write(old_rsp, next->usersp);
this_cpu_write(current_task, next_p);
/*
@@ -413,9 +406,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
+ /* Reload esp0 and ss1. This changes current_thread_info(). */
+ load_sp0(tss, next);
+
this_cpu_write(kernel_stack,
- (unsigned long)task_stack_page(next_p) +
- THREAD_SIZE - KERNEL_STACK_OFFSET);
+ (unsigned long)task_stack_page(next_p) + THREAD_SIZE);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
@@ -602,6 +597,5 @@ long sys_arch_prctl(int code, unsigned long addr)
unsigned long KSTK_ESP(struct task_struct *task)
{
- return (test_tsk_thread_flag(task, TIF_IA32)) ?
- (task_pt_regs(task)->sp) : ((task)->thread.usersp);
+ return task_pt_regs(task)->sp;
}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e510618..a7bc794 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -364,18 +364,12 @@ static int set_segment_reg(struct task_struct *task,
case offsetof(struct user_regs_struct,cs):
if (unlikely(value == 0))
return -EIO;
-#ifdef CONFIG_IA32_EMULATION
- if (test_tsk_thread_flag(task, TIF_IA32))
- task_pt_regs(task)->cs = value;
-#endif
+ task_pt_regs(task)->cs = value;
break;
case offsetof(struct user_regs_struct,ss):
if (unlikely(value == 0))
return -EIO;
-#ifdef CONFIG_IA32_EMULATION
- if (test_tsk_thread_flag(task, TIF_IA32))
- task_pt_regs(task)->ss = value;
-#endif
+ task_pt_regs(task)->ss = value;
break;
}
@@ -1421,7 +1415,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+ info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
}
void user_single_step_siginfo(struct task_struct *tsk,
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d2..e5ecd20 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
+
+static struct pvclock_vsyscall_time_info *
+pvclock_get_vsyscall_user_time_info(int cpu)
+{
+ if (!pvclock_vdso_info) {
+ BUG();
+ return NULL;
+ }
+
+ return &pvclock_vdso_info[cpu];
+}
+
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
+{
+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
+}
+
#ifdef CONFIG_X86_64
+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
+ void *v)
+{
+ struct task_migration_notifier *mn = v;
+ struct pvclock_vsyscall_time_info *pvti;
+
+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
+
+ /* this is NULL when pvclock vsyscall is not initialized */
+ if (unlikely(pvti == NULL))
+ return NOTIFY_DONE;
+
+ pvti->migrate_count++;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvclock_migrate = {
+ .notifier_call = pvclock_task_migrate,
+};
+
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+ pvclock_vdso_info = i;
+
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
+
+ register_task_migration_notifier(&pvclock_migrate);
+
return 0;
}
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bae6c60..86db4bc 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
+ /* ASRock */
+ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
+ .callback = set_pci_reboot,
+ .ident = "ASRock Q1900DC-ITX",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+ DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+ },
+ },
+
/* ASUS */
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index e13f8e7..77630d5 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -226,23 +226,23 @@ swap_pages:
movl (%ebx), %ecx
addl $4, %ebx
1:
- testl $0x1, %ecx /* is it a destination page */
+ testb $0x1, %cl /* is it a destination page */
jz 2f
movl %ecx, %edi
andl $0xfffff000, %edi
jmp 0b
2:
- testl $0x2, %ecx /* is it an indirection page */
+ testb $0x2, %cl /* is it an indirection page */
jz 2f
movl %ecx, %ebx
andl $0xfffff000, %ebx
jmp 0b
2:
- testl $0x4, %ecx /* is it the done indicator */
+ testb $0x4, %cl /* is it the done indicator */
jz 2f
jmp 3f
2:
- testl $0x8, %ecx /* is it the source indicator */
+ testb $0x8, %cl /* is it the source indicator */
jz 0b /* Ignore it otherwise */
movl %ecx, %esi /* For every source page do a copy */
andl $0xfffff000, %esi
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 3fd2c69..98111b3 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -123,7 +123,7 @@ identity_mapped:
* Set cr4 to a known state:
* - physical address extension enabled
*/
- movq $X86_CR4_PAE, %rax
+ movl $X86_CR4_PAE, %eax
movq %rax, %cr4
jmp 1f
@@ -221,23 +221,23 @@ swap_pages:
movq (%rbx), %rcx
addq $8, %rbx
1:
- testq $0x1, %rcx /* is it a destination page? */
+ testb $0x1, %cl /* is it a destination page? */
jz 2f
movq %rcx, %rdi
andq $0xfffffffffffff000, %rdi
jmp 0b
2:
- testq $0x2, %rcx /* is it an indirection page? */
+ testb $0x2, %cl /* is it an indirection page? */
jz 2f
movq %rcx, %rbx
andq $0xfffffffffffff000, %rbx
jmp 0b
2:
- testq $0x4, %rcx /* is it the done indicator? */
+ testb $0x4, %cl /* is it the done indicator? */
jz 2f
jmp 3f
2:
- testq $0x8, %rcx /* is it the source indicator? */
+ testb $0x8, %cl /* is it the source indicator? */
jz 0b /* Ignore it otherwise */
movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi
@@ -246,17 +246,17 @@ swap_pages:
movq %rsi, %rax
movq %r10, %rdi
- movq $512, %rcx
+ movl $512, %ecx
rep ; movsq
movq %rax, %rdi
movq %rdx, %rsi
- movq $512, %rcx
+ movl $512, %ecx
rep ; movsq
movq %rdx, %rdi
movq %r10, %rsi
- movq $512, %rcx
+ movl $512, %ecx
rep ; movsq
lea PAGE_SIZE(%rax), %rsi
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 98dc931..d74ac33 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -122,8 +122,6 @@
unsigned long max_low_pfn_mapped;
unsigned long max_pfn_mapped;
-bool __read_mostly kaslr_enabled = false;
-
#ifdef CONFIG_DMI
RESERVE_BRK(dmi_alloc, 65536);
#endif
@@ -356,7 +354,7 @@ static void __init relocate_initrd(void)
mapaddr = ramdisk_image & PAGE_MASK;
p = early_memremap(mapaddr, clen+slop);
memcpy(q, p+slop, clen);
- early_iounmap(p, clen+slop);
+ early_memunmap(p, clen+slop);
q += clen;
ramdisk_image += clen;
ramdisk_size -= clen;
@@ -427,11 +425,6 @@ static void __init reserve_initrd(void)
}
#endif /* CONFIG_BLK_DEV_INITRD */
-static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
-{
- kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
-}
-
static void __init parse_setup_data(void)
{
struct setup_data *data;
@@ -445,7 +438,7 @@ static void __init parse_setup_data(void)
data_len = data->len + sizeof(struct setup_data);
data_type = data->type;
pa_next = data->next;
- early_iounmap(data, sizeof(*data));
+ early_memunmap(data, sizeof(*data));
switch (data_type) {
case SETUP_E820_EXT:
@@ -457,9 +450,6 @@ static void __init parse_setup_data(void)
case SETUP_EFI:
parse_efi_setup(pa_data, data_len);
break;
- case SETUP_KASLR:
- parse_kaslr_setup(pa_data, data_len);
- break;
default:
break;
}
@@ -480,7 +470,7 @@ static void __init e820_reserve_setup_data(void)
E820_RAM, E820_RESERVED_KERN);
found = 1;
pa_data = data->next;
- early_iounmap(data, sizeof(*data));
+ early_memunmap(data, sizeof(*data));
}
if (!found)
return;
@@ -501,7 +491,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
data = early_memremap(pa_data, sizeof(*data));
memblock_reserve(pa_data, sizeof(*data) + data->len);
pa_data = data->next;
- early_iounmap(data, sizeof(*data));
+ early_memunmap(data, sizeof(*data));
}
}
@@ -842,14 +832,15 @@ static void __init trim_low_memory_range(void)
static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
- if (kaslr_enabled)
+ if (kaslr_enabled()) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
(unsigned long)&_text - __START_KERNEL,
__START_KERNEL,
__START_KERNEL_map,
MODULES_VADDR-1);
- else
+ } else {
pr_emerg("Kernel Offset: disabled\n");
+ }
return 0;
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e504246..3e58186 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -61,8 +61,7 @@
regs->seg = GET_SEG(seg) | 3; \
} while (0)
-int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
- unsigned long *pax)
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
void __user *buf;
unsigned int tmpflags;
@@ -81,7 +80,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
#endif /* CONFIG_X86_32 */
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
- COPY(dx); COPY(cx); COPY(ip);
+ COPY(dx); COPY(cx); COPY(ip); COPY(ax);
#ifdef CONFIG_X86_64
COPY(r8);
@@ -94,27 +93,20 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
COPY(r15);
#endif /* CONFIG_X86_64 */
-#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
-#else /* !CONFIG_X86_32 */
- /* Kernel saves and restores only the CS segment register on signals,
- * which is the bare minimum needed to allow mixed 32/64-bit code.
- * App's signal handler can save/restore other segments if needed. */
- COPY_SEG_CPL3(cs);
-#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
get_user_ex(buf, &sc->fpstate);
-
- get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
+ force_iret();
+
return err;
}
@@ -162,8 +154,9 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->gs);
- put_user_ex(0, &sc->fs);
+ put_user_ex(0, &sc->__pad2);
+ put_user_ex(0, &sc->__pad1);
+ put_user_ex(regs->ss, &sc->ss);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -457,9 +450,19 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
- /* Set up the CS register to run signal handlers in 64-bit mode,
- even if the handler happens to be interrupting 32-bit code. */
+ /*
+ * Set up the CS and SS registers to run signal handlers in
+ * 64-bit mode, even if the handler happens to be interrupting
+ * 32-bit or 16-bit code.
+ *
+ * SS is subtle. In 64-bit mode, we don't need any particular
+ * SS descriptor, but we do need SS to be valid. It's possible
+ * that the old SS is entirely bogus -- this can happen if the
+ * signal we're trying to deliver is #GP or #SS caused by a bad
+ * SS value.
+ */
regs->cs = __USER_CS;
+ regs->ss = __USER_DS;
return 0;
}
@@ -539,7 +542,6 @@ asmlinkage unsigned long sys_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe __user *frame;
- unsigned long ax;
sigset_t set;
frame = (struct sigframe __user *)(regs->sp - 8);
@@ -553,9 +555,9 @@ asmlinkage unsigned long sys_sigreturn(void)
set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->sc, &ax))
+ if (restore_sigcontext(regs, &frame->sc))
goto badframe;
- return ax;
+ return regs->ax;
badframe:
signal_fault(regs, frame, "sigreturn");
@@ -568,7 +570,6 @@ asmlinkage long sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
- unsigned long ax;
sigset_t set;
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
@@ -579,13 +580,13 @@ asmlinkage long sys_rt_sigreturn(void)
set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- return ax;
+ return regs->ax;
badframe:
signal_fault(regs, frame, "rt_sigreturn");
@@ -679,7 +680,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* Ensure the signal handler starts with the new fpu state.
*/
if (used_math())
- drop_init_fpu(current);
+ fpu_reset_state(current);
}
signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
}
@@ -780,7 +781,6 @@ asmlinkage long sys32_x32_rt_sigreturn(void)
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_x32 __user *frame;
sigset_t set;
- unsigned long ax;
frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
@@ -791,13 +791,13 @@ asmlinkage long sys32_x32_rt_sigreturn(void)
set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- return ax;
+ return regs->ax;
badframe:
signal_fault(regs, frame, "x32 rt_sigreturn");
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index febc6aa..50e547e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,9 +77,6 @@
#include <asm/realmode.h>
#include <asm/misc.h>
-/* State of each CPU */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
@@ -257,7 +254,7 @@ static void notrace start_secondary(void *unused)
lock_vector_lock();
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
- per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ cpu_set_state_online(smp_processor_id());
x86_platform.nmi_init();
/* enable local interrupts */
@@ -779,6 +776,26 @@ out:
return boot_error;
}
+void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ /* Just in case we booted with a single CPU. */
+ alternatives_enable_smp();
+
+ per_cpu(current_task, cpu) = idle;
+
+#ifdef CONFIG_X86_32
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ irq_ctx_init(cpu);
+ per_cpu(cpu_current_top_of_stack, cpu) =
+ (unsigned long)task_stack_page(idle) + THREAD_SIZE;
+#else
+ clear_tsk_thread_flag(idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+#endif
+ per_cpu(kernel_stack, cpu) =
+ (unsigned long)task_stack_page(idle) + THREAD_SIZE;
+}
+
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -796,23 +813,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
int cpu0_nmi_registered = 0;
unsigned long timeout;
- /* Just in case we booted with a single CPU. */
- alternatives_enable_smp();
-
idle->thread.sp = (unsigned long) (((struct pt_regs *)
(THREAD_SIZE + task_stack_page(idle))) - 1);
- per_cpu(current_task, cpu) = idle;
-#ifdef CONFIG_X86_32
- /* Stack for startup_32 can be just as for start_secondary onwards */
- irq_ctx_init(cpu);
-#else
- clear_tsk_thread_flag(idle, TIF_FORK);
- initial_gs = per_cpu_offset(cpu);
-#endif
- per_cpu(kernel_stack, cpu) =
- (unsigned long)task_stack_page(idle) -
- KERNEL_STACK_OFFSET + THREAD_SIZE;
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
@@ -948,11 +951,16 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
*/
mtrr_save_state();
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ /* x86 CPUs take themselves offline, so delayed offline is OK. */
+ err = cpu_check_up_prepare(cpu);
+ if (err && err != -EBUSY)
+ return err;
/* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu);
+ common_cpu_up(cpu, tidle);
+
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
@@ -1086,8 +1094,6 @@ static int __init smp_sanity_check(unsigned max_cpus)
return SMP_NO_APIC;
}
- verify_local_APIC();
-
/*
* If SMP should be disabled, then really disable it!
*/
@@ -1191,7 +1197,7 @@ void __init native_smp_prepare_boot_cpu(void)
switch_to_new_gdt(me);
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
- per_cpu(cpu_state, me) = CPU_ONLINE;
+ cpu_set_state_online(me);
}
void __init native_smp_cpus_done(unsigned int max_cpus)
@@ -1318,14 +1324,10 @@ static void __ref remove_cpu_from_maps(int cpu)
numa_remove_cpu(cpu);
}
-static DEFINE_PER_CPU(struct completion, die_complete);
-
void cpu_disable_common(void)
{
int cpu = smp_processor_id();
- init_completion(&per_cpu(die_complete, smp_processor_id()));
-
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
@@ -1349,24 +1351,27 @@ int native_cpu_disable(void)
return 0;
}
-void cpu_die_common(unsigned int cpu)
+int common_cpu_die(unsigned int cpu)
{
- wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
-}
+ int ret = 0;
-void native_cpu_die(unsigned int cpu)
-{
/* We don't do anything here: idle task is faking death itself. */
- cpu_die_common(cpu);
-
/* They ack this in play_dead() by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (cpu_wait_death(cpu, 5)) {
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
} else {
pr_err("CPU %u didn't die...\n", cpu);
+ ret = -1;
}
+
+ return ret;
+}
+
+void native_cpu_die(unsigned int cpu)
+{
+ common_cpu_die(cpu);
}
void play_dead_common(void)
@@ -1375,10 +1380,8 @@ void play_dead_common(void)
reset_lazy_tlbstate();
amd_e400_remove_cpu(raw_smp_processor_id());
- mb();
/* Ack it */
- __this_cpu_write(cpu_state, CPU_DEAD);
- complete(&per_cpu(die_complete, smp_processor_id()));
+ (void)cpu_report_death();
/*
* With physical CPU hotplug, we should halt the cpu
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e2..10e0272 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -34,10 +34,26 @@ static unsigned long get_align_mask(void)
return va_align.mask;
}
+/*
+ * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
+ * va_align.bits, [12:upper_bit), are set to a random value instead of
+ * zeroing them. This random value is computed once per boot. This form
+ * of ASLR is known as "per-boot ASLR".
+ *
+ * To achieve this, the random value is added to the info.align_offset
+ * value before calling vm_unmapped_area() or ORed directly to the
+ * address.
+ */
+static unsigned long get_align_bits(void)
+{
+ return va_align.bits & get_align_mask();
+}
+
unsigned long align_vdso_addr(unsigned long addr)
{
unsigned long align_mask = get_align_mask();
- return (addr + align_mask) & ~align_mask;
+ addr = (addr + align_mask) & ~align_mask;
+ return addr | get_align_bits();
}
static int __init control_va_addr_alignment(char *str)
@@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = begin;
info.high_limit = end;
- info.align_mask = filp ? get_align_mask() : 0;
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp) {
+ info.align_mask = get_align_mask();
+ info.align_offset += get_align_bits();
+ }
return vm_unmapped_area(&info);
}
@@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- info.align_mask = filp ? get_align_mask() : 0;
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp) {
+ info.align_mask = get_align_mask();
+ info.align_offset += get_align_bits();
+ }
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/kernel/syscall_32.c
index e9bcd57..3777189 100644
--- a/arch/x86/kernel/syscall_32.c
+++ b/arch/x86/kernel/syscall_32.c
@@ -5,21 +5,29 @@
#include <linux/cache.h>
#include <asm/asm-offsets.h>
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ;
+#ifdef CONFIG_IA32_EMULATION
+#define SYM(sym, compat) compat
+#else
+#define SYM(sym, compat) sym
+#define ia32_sys_call_table sys_call_table
+#define __NR_ia32_syscall_max __NR_syscall_max
+#endif
+
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
#include <asm/syscalls_32.h>
#undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym, compat) [nr] = sym,
+#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
-__visible const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
+ [0 ... __NR_ia32_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index b79133a..5ecbfe5 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -57,7 +57,7 @@ int rodata_test(void)
/* test 3: check the value hasn't changed */
/* If this test fails, we managed to overwrite the data */
if (!rodata_test_data) {
- printk(KERN_ERR "rodata_test: Test 3 failes (end data)\n");
+ printk(KERN_ERR "rodata_test: Test 3 fails (end data)\n");
return -ENODEV;
}
/* test 4: check if the rodata section is 4Kb aligned */
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 25adc0e..d39c091 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -30,7 +30,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
+ if (!user_mode(regs) && in_lock_functions(pc)) {
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9d2073e..324ab52 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
{
enum ctx_state prev_state;
- if (user_mode_vm(regs)) {
+ if (user_mode(regs)) {
/* Other than that, we're just an exception. */
prev_state = exception_enter();
} else {
@@ -123,7 +123,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
* but we need to notify RCU.
*/
rcu_nmi_enter();
- prev_state = IN_KERNEL; /* the value is irrelevant. */
+ prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */
}
/*
@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
/* Must be before exception_exit. */
preempt_count_sub(HARDIRQ_OFFSET);
- if (user_mode_vm(regs))
+ if (user_mode(regs))
return exception_exit(prev_state);
else
rcu_nmi_exit();
@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
*
* IST exception handlers normally cannot schedule. As a special
* exception, if the exception interrupted userspace code (i.e.
- * user_mode_vm(regs) would return true) and the exception was not
+ * user_mode(regs) would return true) and the exception was not
* a double fault, it can be safe to schedule. ist_begin_non_atomic()
* begins a non-atomic section within an ist_enter()/ist_exit() region.
* Callers are responsible for enabling interrupts themselves inside
@@ -167,15 +167,15 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
*/
void ist_begin_non_atomic(struct pt_regs *regs)
{
- BUG_ON(!user_mode_vm(regs));
+ BUG_ON(!user_mode(regs));
/*
* Sanity check: we need to be on the normal thread stack. This
* will catch asm bugs and any attempt to use ist_preempt_enable
* from double_fault.
*/
- BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack))
- & ~(THREAD_SIZE - 1)) != 0);
+ BUG_ON((unsigned long)(current_top_of_stack() -
+ current_stack_pointer()) >= THREAD_SIZE);
preempt_count_sub(HARDIRQ_OFFSET);
}
@@ -194,8 +194,7 @@ static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
struct pt_regs *regs, long error_code)
{
-#ifdef CONFIG_X86_32
- if (regs->flags & X86_VM_MASK) {
+ if (v8086_mode(regs)) {
/*
* Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
* On nmi (interrupt 2), do_trap should not be called.
@@ -207,7 +206,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
}
return -1;
}
-#endif
+
if (!user_mode(regs)) {
if (!fixup_exception(regs)) {
tsk->thread.error_code = error_code;
@@ -462,13 +461,11 @@ do_general_protection(struct pt_regs *regs, long error_code)
prev_state = exception_enter();
conditional_sti(regs);
-#ifdef CONFIG_X86_32
- if (regs->flags & X86_VM_MASK) {
+ if (v8086_mode(regs)) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
goto exit;
}
-#endif
tsk = current;
if (!user_mode(regs)) {
@@ -587,7 +584,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
/* Copy the remainder of the stack from the current stack. */
memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
- BUG_ON(!user_mode_vm(&new_stack->regs));
+ BUG_ON(!user_mode(&new_stack->regs));
return new_stack;
}
NOKPROBE_SYMBOL(fixup_bad_iret);
@@ -673,7 +670,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
- if (regs->flags & X86_VM_MASK) {
+ if (v8086_mode(regs)) {
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
preempt_conditional_cli(regs);
@@ -721,7 +718,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
return;
conditional_sti(regs);
- if (!user_mode_vm(regs))
+ if (!user_mode(regs))
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
@@ -734,7 +731,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
/*
* Save the info for the exception handler and clear the error.
*/
- save_init_fpu(task);
+ unlazy_fpu(task);
task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
info.si_signo = SIGFPE;
@@ -863,7 +860,7 @@ void math_state_restore(void)
kernel_fpu_disable();
__thread_fpu_begin(tsk);
if (unlikely(restore_fpu_checking(tsk))) {
- drop_init_fpu(tsk);
+ fpu_reset_state(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
} else {
tsk->thread.fpu_counter++;
@@ -925,9 +922,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
/* Set of traps needed for early debugging. */
void __init early_trap_init(void)
{
- set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+ /*
+ * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
+ * is ready in cpu_init() <-- trap_init(). Before trap_init(),
+ * CPU runs at ring 0 so it is impossible to hit an invalid
+ * stack. Using the original stack works well enough at this
+ * early stage. DEBUG_STACK will be equipped after cpu_init() in
+ * trap_init().
+ *
+ * We don't need to set trace_idt_table like set_intr_gate(),
+ * since we don't have trace_debug and it will be reset to
+ * 'debug' in trap_init() by set_intr_gate_ist().
+ */
+ set_intr_gate_notrace(X86_TRAP_DB, debug);
/* int3 can be called from all */
- set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+ set_system_intr_gate(X86_TRAP_BP, &int3);
#ifdef CONFIG_X86_32
set_intr_gate(X86_TRAP_PF, page_fault);
#endif
@@ -1005,6 +1014,15 @@ void __init trap_init(void)
*/
cpu_init();
+ /*
+ * X86_TRAP_DB and X86_TRAP_BP have been set
+ * in early_trap_init(). However, ITS works only after
+ * cpu_init() loads TSS. See comments in early_trap_init().
+ */
+ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+ /* int3 can be called from all */
+ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+
x86_init.irqs.trap_init();
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 81f8adb0..0b81ad6 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
int ret = NOTIFY_DONE;
/* We are only interested in userspace traps */
- if (regs && !user_mode_vm(regs))
+ if (regs && !user_mode(regs))
return NOTIFY_DONE;
switch (val) {
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index e8edcf5..fc9db6e 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -150,7 +150,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
do_exit(SIGSEGV);
}
- tss = &per_cpu(init_tss, get_cpu());
+ tss = &per_cpu(cpu_tss, get_cpu());
current->thread.sp0 = current->thread.saved_sp0;
current->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, &current->thread);
@@ -318,7 +318,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
tsk->thread.saved_fs = info->regs32->fs;
tsk->thread.saved_gs = get_user_gs(info->regs32);
- tss = &per_cpu(init_tss, get_cpu());
+ tss = &per_cpu(cpu_tss, get_cpu());
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index c7d791f..51e3304 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -31,30 +31,30 @@ void update_vsyscall(struct timekeeper *tk)
gtod_write_begin(vdata);
/* copy vsyscall data */
- vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode;
- vdata->cycle_last = tk->tkr.cycle_last;
- vdata->mask = tk->tkr.mask;
- vdata->mult = tk->tkr.mult;
- vdata->shift = tk->tkr.shift;
+ vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+ vdata->cycle_last = tk->tkr_mono.cycle_last;
+ vdata->mask = tk->tkr_mono.mask;
+ vdata->mult = tk->tkr_mono.mult;
+ vdata->shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec;
- vdata->wall_time_snsec = tk->tkr.xtime_nsec;
+ vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
- vdata->monotonic_time_snsec = tk->tkr.xtime_nsec
+ vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr.shift);
+ << tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
+ (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr.shift;
+ ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
- vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
- tk->tkr.shift);
+ vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 34f66e5..87a815b 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -342,7 +342,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
config_enabled(CONFIG_IA32_EMULATION));
if (!buf) {
- drop_init_fpu(tsk);
+ fpu_reset_state(tsk);
return 0;
}
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
* thread's fpu state, reconstruct fxstate from the fsave
* header. Sanitize the copied state etc.
*/
- struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+ struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
*/
drop_fpu(tsk);
- if (__copy_from_user(xsave, buf_fx, state_size) ||
+ if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
__copy_from_user(&env, buf, sizeof(env))) {
+ fpu_finit(fpu);
err = -1;
} else {
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
- set_used_math();
}
+ set_used_math();
if (use_eager_fpu()) {
preempt_disable();
math_state_restore();
@@ -415,7 +416,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
*/
user_fpu_begin();
if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
- drop_init_fpu(tsk);
+ fpu_reset_state(tsk);
return -1;
}
}
@@ -677,19 +678,13 @@ void xsave_init(void)
this_func();
}
-static inline void __init eager_fpu_init_bp(void)
-{
- current->thread.fpu.state =
- alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct));
- if (!init_xstate_buf)
- setup_init_fpu_buf();
-}
-
-void eager_fpu_init(void)
+/*
+ * setup_init_fpu_buf() is __init and it is OK to call it here because
+ * init_xstate_buf will be unset only once during boot.
+ */
+void __init_refok eager_fpu_init(void)
{
- static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
-
- clear_used_math();
+ WARN_ON(used_math());
current_thread_info()->status = 0;
if (eagerfpu == ENABLE)
@@ -700,21 +695,8 @@ void eager_fpu_init(void)
return;
}
- if (boot_func) {
- boot_func();
- boot_func = NULL;
- }
-
- /*
- * This is same as math_state_restore(). But use_xsave() is
- * not yet patched to use math_state_restore().
- */
- init_fpu(current);
- __thread_fpu_begin(current);
- if (cpu_has_xsave)
- xrstor_state(init_xstate_buf, -1);
- else
- fxrstor_checking(&init_xstate_buf->i387);
+ if (!init_xstate_buf)
+ setup_init_fpu_buf();
}
/*
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 08f790d..16e8f96 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
-ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Iarch/x86/kvm
CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8a80737..59b69f6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -104,6 +104,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
((best->eax & 0xff00) >> 8) != 0)
return -EINVAL;
+ /* Update physical-address width */
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_pmu_cpuid_update(vcpu);
return 0;
}
@@ -135,6 +138,21 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
}
}
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best)
+ return best->eax & 0xff;
+not_found:
+ return 36;
+}
+EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
+
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
@@ -757,21 +775,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
- if (!best || best->eax < 0x80000008)
- goto not_found;
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best)
- return best->eax & 0xff;
-not_found:
- return 36;
-}
-EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
-
/*
* If no match is found, check whether we exceed the vCPU's limit
* and return the content of the highest valid _standard_ leaf instead.
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 4452eed..c3b1ad9 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -20,13 +20,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry2 __user *entries);
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+
+static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.maxphyaddr;
+}
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
if (!static_cpu_has(X86_FEATURE_XSAVE))
- return 0;
+ return false;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 106c015..630bcb0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -248,27 +248,7 @@ struct mode_dual {
struct opcode mode64;
};
-/* EFLAGS bit definitions. */
-#define EFLG_ID (1<<21)
-#define EFLG_VIP (1<<20)
-#define EFLG_VIF (1<<19)
-#define EFLG_AC (1<<18)
-#define EFLG_VM (1<<17)
-#define EFLG_RF (1<<16)
-#define EFLG_IOPL (3<<12)
-#define EFLG_NT (1<<14)
-#define EFLG_OF (1<<11)
-#define EFLG_DF (1<<10)
-#define EFLG_IF (1<<9)
-#define EFLG_TF (1<<8)
-#define EFLG_SF (1<<7)
-#define EFLG_ZF (1<<6)
-#define EFLG_AF (1<<4)
-#define EFLG_PF (1<<2)
-#define EFLG_CF (1<<0)
-
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
-#define EFLG_RESERVED_ONE_MASK 2
enum x86_transfer_type {
X86_TRANSFER_NONE,
@@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
-#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
+#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
+ X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
@@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask)
*dest = (*dest & ~mask) | (src & mask);
}
+static void assign_register(unsigned long *reg, u64 val, int bytes)
+{
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+ switch (bytes) {
+ case 1:
+ *(u8 *)reg = (u8)val;
+ break;
+ case 2:
+ *(u16 *)reg = (u16)val;
+ break;
+ case 4:
+ *reg = (u32)val;
+ break; /* 64b: zero-extend */
+ case 8:
+ *reg = val;
+ break;
+ }
+}
+
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
@@ -943,6 +943,22 @@ FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
+static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsf);
+}
+
+static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsr);
+}
+
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
@@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
- in_page = (ctxt->eflags & EFLG_DF) ?
+ in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
@@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
- !(ctxt->eflags & EFLG_DF)) {
+ !(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
@@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static void write_register_operand(struct operand *op)
{
- /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
- switch (op->bytes) {
- case 1:
- *(u8 *)op->addr.reg = (u8)op->val;
- break;
- case 2:
- *(u16 *)op->addr.reg = (u16)op->val;
- break;
- case 4:
- *op->addr.reg = (u32)op->val;
- break; /* 64b: zero-extend */
- case 8:
- *op->addr.reg = op->val;
- break;
- }
+ return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
@@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
{
int rc;
unsigned long val, change_mask;
- int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
- | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
+ change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
+ X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
+ X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
- change_mask |= EFLG_IOPL;
+ change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
- change_mask |= (EFLG_IOPL | EFLG_IF);
+ change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
@@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
+ ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
@@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
+ u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
@@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
--reg;
}
- rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
+ rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
+ assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
@@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
if (rc != X86EMUL_CONTINUE)
return rc;
- ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
+ ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
@@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
- unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
- EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
- EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
- unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
+ unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
+ X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
+ X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
+ X86_EFLAGS_AC | X86_EFLAGS_ID |
+ X86_EFLAGS_FIXED;
+ unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
+ X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
@@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
ctxt->_eip = temp_eip;
-
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
@@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
@@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
- ctxt->eflags &= ~EFLG_ZF;
+ ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
- ctxt->eflags |= EFLG_ZF;
+ ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
@@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
- if (ctxt->eflags & EFLG_ZF) {
+ if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
@@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
@@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
+ cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
@@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
return emulate_gp(ctxt, 0);
break;
}
- cs_sel |= SELECTOR_RPL_MASK;
- ss_sel |= SELECTOR_RPL_MASK;
+ cs_sel |= SEGMENT_RPL_MASK;
+ ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
@@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
- iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
@@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
- return X86EMUL_CONTINUE;
+ return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
@@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
- int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
+ int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
@@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_vmcall(struct x86_emulate_ctxt *ctxt)
+static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
@@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
return em_lgdt_lidt(ctxt, true);
}
-static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
-{
- int rc;
-
- rc = ctxt->ops->fix_hypercall(ctxt);
-
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return rc;
-}
-
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
@@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
- flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
+ flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
@@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
static const struct opcode group7_rm0[] = {
N,
- I(SrcNone | Priv | EmulateOnUD, em_vmcall),
+ I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
@@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = {
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
+ II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
@@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = {
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
- F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
+ I(DstReg | SrcMem | ModRM, em_bsf_c),
+ I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
@@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == 0))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
@@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
ctxt->eip = ctxt->_eip;
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
@@ -4963,9 +4961,9 @@ special_insn:
}
if (ctxt->rep_prefix && (ctxt->d & String))
- ctxt->eflags |= EFLG_RF;
+ ctxt->eflags |= X86_EFLAGS_RF;
else
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
@@ -5014,7 +5012,7 @@ special_insn:
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
- if (ctxt->eflags & EFLG_OF)
+ if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
@@ -5027,19 +5025,19 @@ special_insn:
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
- ctxt->eflags ^= EFLG_CF;
+ ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
- ctxt->eflags &= ~EFLG_CF;
+ ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
- ctxt->eflags |= EFLG_CF;
+ ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
- ctxt->eflags &= ~EFLG_DF;
+ ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
- ctxt->eflags |= EFLG_DF;
+ ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
@@ -5100,7 +5098,7 @@ writeback:
}
goto done; /* skip rip writeback */
}
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
@@ -5137,8 +5135,7 @@ twobyte_insn:
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
- else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
- ctxt->op_bytes != 4)
+ else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 298781d..4dce6f8 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr)
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
}
-static int pit_ioport_write(struct kvm_io_device *this,
+static int pit_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int pit_ioport_read(struct kvm_io_device *this,
+static int pit_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_write(struct kvm_io_device *this,
+static int speaker_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
@@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *data)
+static int speaker_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index dd1b16b..c84990b 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -3,7 +3,7 @@
#include <linux/kthread.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm_kpit_channel_state {
u32 count; /* can be 65536 */
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index cc31f7c..fef922f 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s,
return -EOPNOTSUPP;
if (len != 1) {
+ memset(val, 0, len);
pr_pic_unimpl("non byte read\n");
return 0;
}
@@ -528,42 +529,42 @@ static int picdev_read(struct kvm_pic *s,
return 0;
}
-static int picdev_master_write(struct kvm_io_device *dev,
+static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_master_read(struct kvm_io_device *dev,
+static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_slave_write(struct kvm_io_device *dev,
+static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_slave_read(struct kvm_io_device *dev,
+static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_eclr_write(struct kvm_io_device *dev,
+static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
addr, len, val);
}
-static int picdev_eclr_read(struct kvm_io_device *dev,
+static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0..28146f0 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
old_irr = ioapic->irr;
ioapic->irr |= mask;
+ if (edge)
+ ioapic->irr_delivered &= ~mask;
if ((edge && old_irr == ioapic->irr) ||
(!edge && entry.fields.remote_irr)) {
ret = 0;
@@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
irqe.shorthand = 0;
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
- ioapic->irr &= ~(1 << irq);
+ ioapic->irr_delivered |= 1 << irq;
if (irq == RTC_GSI && line_status) {
/*
@@ -422,6 +424,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
int i;
+ struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +446,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
spin_lock(&ioapic->lock);
- if (trigger_mode != IOAPIC_LEVEL_TRIG)
+ if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+ kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
@@ -471,13 +475,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
}
}
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- return test_bit(vector, ioapic->handled_vectors);
-}
-
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
@@ -498,8 +495,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
-static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
- void *val)
+static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 result;
@@ -541,8 +538,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
return 0;
}
-static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
+static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 data;
@@ -597,6 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
ioapic->ioregsel = 0;
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
ioapic->id = 0;
memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
rtc_irq_eoi_tracking_reset(ioapic);
@@ -654,6 +652,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
+ state->irr &= ~ioapic->irr_delivered;
spin_unlock(&ioapic->lock);
return 0;
}
@@ -667,6 +666,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
update_handled_vectors(ioapic);
kvm_vcpu_request_scan_ioapic(kvm);
kvm_ioapic_inject_all(ioapic, state->irr);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index c2e36d9..ca0b0b4 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -3,7 +3,7 @@
#include <linux/kvm_host.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm;
struct kvm_vcpu;
@@ -77,6 +77,7 @@ struct kvm_ioapic {
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
u32 irq_eoi[IOAPIC_NUM_PINS];
+ u32 irr_delivered;
};
#ifdef DEBUG
@@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
return kvm->arch.vioapic;
}
+static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ return test_bit(vector, ioapic->handled_vectors);
+}
+
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 2d03568..ad68c73 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -27,7 +27,7 @@
#include <linux/kvm_host.h>
#include <linux/spinlock.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#include "ioapic.h"
#include "lapic.h"
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34d..d67206a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+/* The logical map is definitely wrong if we have multiple
+ * modes at the same time. (Physical map is always right.)
+ */
+static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
+{
+ return !(map->mode & (map->mode - 1));
+}
+
+static inline void
+apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
+{
+ unsigned lid_bits;
+
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
+ BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
+ lid_bits = map->mode;
+
+ *cid = dest_id >> lid_bits;
+ *lid = dest_id & ((1 << lid_bits) - 1);
+}
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm)
if (!new)
goto out;
- new->ldr_bits = 8;
- /* flat mode is default */
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- new->broadcast = APIC_BROADCAST;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (apic_x2apic_mode(apic)) {
- new->ldr_bits = 32;
- new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
- new->broadcast = X2APIC_BROADCAST;
- } else if (kvm_apic_get_reg(apic, APIC_LDR)) {
- if (kvm_apic_get_reg(apic, APIC_DFR) ==
- APIC_DFR_CLUSTER) {
- new->cid_shift = 4;
- new->cid_mask = 0xf;
- new->lid_mask = 0xf;
- } else {
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- }
- }
-
- /*
- * All APICs have to be configured in the same mode by an OS.
- * We take advatage of this while building logical id loockup
- * table. After reset APICs are in software disabled mode, so if
- * we find apic with different setting we assume this is the mode
- * OS wants all apics to be in; build lookup table accordingly.
- */
- if (kvm_apic_sw_enabled(apic))
- break;
- }
-
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
@@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm)
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
- cid = apic_cluster_id(new, ldr);
- lid = apic_logical_id(new, ldr);
if (aid < ARRAY_SIZE(new->phys_map))
new->phys_map[aid] = apic;
+
+ if (apic_x2apic_mode(apic)) {
+ new->mode |= KVM_APIC_MODE_X2APIC;
+ } else if (ldr) {
+ ldr = GET_APIC_LOGICAL_ID(ldr);
+ if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
+ new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
+ else
+ new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
+ }
+
+ if (!kvm_apic_logical_map_valid(new))
+ continue;
+
+ apic_logical_id(new, ldr, &cid, &lid);
+
if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
}
@@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
{
- return dest == (apic_x2apic_mode(apic) ?
- X2APIC_BROADCAST : APIC_BROADCAST);
+ if (apic_x2apic_mode(apic))
+ return mda == X2APIC_BROADCAST;
+
+ return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
}
-static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
{
- return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
+ if (kvm_apic_broadcast(apic, mda))
+ return true;
+
+ if (apic_x2apic_mode(apic))
+ return mda == kvm_apic_id(apic);
+
+ return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
}
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
@@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
&& (logical_id & mda & 0xffff) != 0;
logical_id = GET_APIC_LOGICAL_ID(logical_id);
+ mda = GET_APIC_DEST_FIELD(mda);
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
@@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
}
}
+/* KVM APIC implementation has two quirks
+ * - dest always begins at 0 while xAPIC MDA has offset 24,
+ * - IOxAPIC messages have to be delivered (directly) to x2APIC.
+ */
+static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
+ struct kvm_lapic *target)
+{
+ bool ipi = source != NULL;
+ bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
+
+ if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
+ return X2APIC_BROADCAST;
+
+ return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
+}
+
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode)
{
struct kvm_lapic *target = vcpu->arch.apic;
+ u32 mda = kvm_apic_mda(dest, source, target);
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
@@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == APIC_DEST_PHYSICAL)
- return kvm_apic_match_physical_addr(target, dest);
+ return kvm_apic_match_physical_addr(target, mda);
else
- return kvm_apic_match_logical_addr(target, dest);
+ return kvm_apic_match_logical_addr(target, mda);
case APIC_DEST_SELF:
return target == source;
case APIC_DEST_ALLINC:
@@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic **dst;
int i;
bool ret = false;
+ bool x2apic_ipi = src && apic_x2apic_mode(src);
*r = -1;
@@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (irq->shorthand)
return false;
+ if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
+ return false;
+
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
- if (irq->dest_id == map->broadcast)
- goto out;
-
ret = true;
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
@@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
dst = &map->phys_map[irq->dest_id];
} else {
- u32 mda = irq->dest_id << (32 - map->ldr_bits);
- u16 cid = apic_cluster_id(map, mda);
+ u16 cid;
+
+ if (!kvm_apic_logical_map_valid(map)) {
+ ret = false;
+ goto out;
+ }
+
+ apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
if (cid >= ARRAY_SIZE(map->logical_map))
goto out;
dst = map->logical_map[cid];
- bitmap = apic_logical_id(map, mda);
-
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
@@ -833,8 +858,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
- if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
- kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+ if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
@@ -1038,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
-static int apic_mmio_read(struct kvm_io_device *this,
+static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1358,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
return ret;
}
-static int apic_mmio_write(struct kvm_io_device *this,
+static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1498,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0bc6c65..9d28383 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -1,7 +1,7 @@
#ifndef __KVM_X86_LAPIC_H
#define __KVM_X86_LAPIC_H
-#include "iodev.h"
+#include <kvm/iodev.h>
#include <linux/kvm_host.h>
@@ -148,21 +148,6 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
return kvm_x86_ops->vm_has_apicv(kvm);
}
-static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
-{
- u16 cid;
- ldr >>= 32 - map->ldr_bits;
- cid = (ldr >> map->cid_shift) & map->cid_mask;
-
- return cid;
-}
-
-static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
-{
- ldr >>= (32 - map->ldr_bits);
- return ldr & map->lid_mask;
-}
-
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic->pending_events;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cee7592..146f295 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4465,6 +4465,79 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
}
+static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
+ unsigned long *rmapp)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ int need_tlb_flush = 0;
+ pfn_t pfn;
+ struct kvm_mmu_page *sp;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ sp = page_header(__pa(sptep));
+ pfn = spte_to_pfn(*sptep);
+
+ /*
+ * Only EPT supported for now; otherwise, one would need to
+ * find out efficiently whether the guest page tables are
+ * also using huge pages.
+ */
+ if (sp->role.direct &&
+ !kvm_is_reserved_pfn(pfn) &&
+ PageTransCompound(pfn_to_page(pfn))) {
+ drop_spte(kvm, sptep);
+ sptep = rmap_get_first(*rmapp, &iter);
+ need_tlb_flush = 1;
+ } else
+ sptep = rmap_get_next(&iter);
+ }
+
+ return need_tlb_flush;
+}
+
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ bool flush = false;
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+ gfn_t gfn_start, gfn_end;
+
+ spin_lock(&kvm->mmu_lock);
+
+ gfn_start = memslot->base_gfn;
+ gfn_end = memslot->base_gfn + memslot->npages - 1;
+
+ if (gfn_start >= gfn_end)
+ goto out;
+
+ rmapp = memslot->arch.rmap[0];
+ last_index = gfn_to_index(gfn_end, memslot->base_gfn,
+ PT_PAGE_TABLE_LEVEL);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (flush) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+}
+
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8e6b7d8..29fbf9d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping {
};
/* mapping between fixed pmc index and arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {1, 0, 7};
static bool pmc_is_gp(struct kvm_pmc *pmc)
{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cc618c8..ce741b8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1261,7 +1261,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&svm->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
svm_init_osvw(&svm->vcpu);
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
static int halt_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
- skip_emulated_instruction(&svm->vcpu);
return kvm_emulate_halt(&svm->vcpu);
}
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
kvm_emulate_hypercall(&svm->vcpu);
return 1;
}
@@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
- vcpu->arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
+ kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
+ kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
@@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm)
{
- trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
+static int wbinvd_interception(struct vcpu_svm *svm)
+{
+ kvm_emulate_wbinvd(&svm->vcpu);
+ return 1;
+}
+
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
@@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return 1;
}
-bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
+static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
+ unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false;
@@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
- cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+ if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
+ cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
+ else
+ cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
err = 0;
if (cr >= 16) { /* mov to cr */
@@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm)
{
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
@@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else {
trace_kvm_msr_read(ecx, data);
- svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
- svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
@@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
msr.index = ecx;
@@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
[SVM_EXIT_READ_CR8] = cr_interception,
- [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
+ [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
[SVM_EXIT_WRITE_CR0] = cr_interception,
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
@@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
- [SVM_EXIT_WBINVD] = emulate_on_interception,
+ [SVM_EXIT_WBINVD] = wbinvd_interception,
[SVM_EXIT_MONITOR] = monitor_interception,
[SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
@@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
+ WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f7b20b4..f5e8dce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
{
unsigned long *msr_bitmap;
- if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
+ if (is_guest_mode(vcpu))
+ msr_bitmap = vmx_msr_bitmap_nested;
+ else if (irqchip_in_kernel(vcpu->kvm) &&
+ apic_x2apic_mode(vcpu->arch.apic)) {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
@@ -2467,6 +2470,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
@@ -2476,8 +2480,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_EPT |
- SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@@ -2491,6 +2494,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
+ if (enable_unrestricted_guest)
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,
@@ -3262,8 +3269,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
* default value.
*/
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
- save->selector &= ~SELECTOR_RPL_MASK;
- save->dpl = save->selector & SELECTOR_RPL_MASK;
+ save->selector &= ~SEGMENT_RPL_MASK;
+ save->dpl = save->selector & SEGMENT_RPL_MASK;
save->s = 1;
}
vmx_set_segment(vcpu, save, seg);
@@ -3836,7 +3843,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
unsigned int cs_rpl;
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- cs_rpl = cs.selector & SELECTOR_RPL_MASK;
+ cs_rpl = cs.selector & SEGMENT_RPL_MASK;
if (cs.unusable)
return false;
@@ -3864,7 +3871,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
unsigned int ss_rpl;
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- ss_rpl = ss.selector & SELECTOR_RPL_MASK;
+ ss_rpl = ss.selector & SEGMENT_RPL_MASK;
if (ss.unusable)
return true;
@@ -3886,7 +3893,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
unsigned int rpl;
vmx_get_segment(vcpu, &var, seg);
- rpl = var.selector & SELECTOR_RPL_MASK;
+ rpl = var.selector & SEGMENT_RPL_MASK;
if (var.unusable)
return true;
@@ -3913,7 +3920,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
if (tr.unusable)
return false;
- if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
return false;
@@ -3931,7 +3938,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
if (ldtr.unusable)
return true;
- if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (ldtr.type != 2)
return false;
@@ -3948,8 +3955,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- return ((cs.selector & SELECTOR_RPL_MASK) ==
- (ss.selector & SELECTOR_RPL_MASK));
+ return ((cs.selector & SEGMENT_RPL_MASK) ==
+ (ss.selector & SEGMENT_RPL_MASK));
}
/*
@@ -4705,7 +4712,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&vmx->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&vmx->vcpu))
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
apic_base_msr.host_initiated = true;
kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
@@ -5000,7 +5007,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
}
return 1;
}
@@ -5065,6 +5072,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
}
if (is_invalid_opcode(intr_info)) {
+ if (is_guest_mode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5084,9 +5095,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
- vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
+ vcpu->run->internal.data[2] = error_code;
return 0;
}
@@ -5527,13 +5539,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
static int handle_halt(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_hypercall(vcpu);
return 1;
}
@@ -5564,7 +5574,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_wbinvd(vcpu);
return 1;
}
@@ -5822,7 +5831,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
skip_emulated_instruction(vcpu);
return 1;
}
@@ -5903,7 +5912,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- ret = kvm_emulate_halt(vcpu);
+ ret = kvm_vcpu_halt(vcpu);
goto out;
}
@@ -7312,21 +7321,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b;
else
- return 1;
+ return true;
bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
- return 1;
+ return true;
if (b & (1 << (port & 7)))
- return 1;
+ return true;
port++;
size--;
last_bitmap = bitmap;
}
- return 0;
+ return false;
}
/*
@@ -7342,7 +7351,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return 1;
+ return true;
/*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
@@ -7361,10 +7370,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
if (msr_index < 1024*8) {
unsigned char b;
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
- return 1;
+ return true;
return 1 & (b >> (msr_index & 7));
} else
- return 1; /* let L1 handle the wrong parameter */
+ return true; /* let L1 handle the wrong parameter */
}
/*
@@ -7386,7 +7395,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
case 0:
if (vmcs12->cr0_guest_host_mask &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
break;
case 3:
if ((vmcs12->cr3_target_count >= 1 &&
@@ -7397,37 +7406,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
vmcs12->cr3_target_value2 == val) ||
(vmcs12->cr3_target_count >= 4 &&
vmcs12->cr3_target_value3 == val))
- return 0;
+ return false;
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
- return 1;
+ return true;
break;
case 4:
if (vmcs12->cr4_guest_host_mask &
(vmcs12->cr4_read_shadow ^ val))
- return 1;
+ return true;
break;
case 8:
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
- return 1;
+ return true;
break;
}
break;
case 2: /* clts */
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
(vmcs12->cr0_read_shadow & X86_CR0_TS))
- return 1;
+ return true;
break;
case 1: /* mov from cr */
switch (cr) {
case 3:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR3_STORE_EXITING)
- return 1;
+ return true;
break;
case 8:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR8_STORE_EXITING)
- return 1;
+ return true;
break;
}
break;
@@ -7438,14 +7447,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
*/
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
!(vmcs12->cr0_read_shadow & 0x1) &&
(val & 0x1))
- return 1;
+ return true;
break;
}
- return 0;
+ return false;
}
/*
@@ -7468,48 +7477,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
KVM_ISA_VMX);
if (vmx->nested.nested_run_pending)
- return 0;
+ return false;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
- return 1;
+ return true;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
- return 0;
+ return false;
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
- return 0;
+ return false;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
- return 0;
+ return false;
case EXIT_REASON_TRIPLE_FAULT:
- return 1;
+ return true;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
- return 1;
+ return true;
case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
- return 0;
- return 1;
+ return false;
+ return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
- return 1;
+ return true;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
- case EXIT_REASON_RDTSC:
+ case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
@@ -7521,7 +7530,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
- return 1;
+ return true;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
@@ -7532,7 +7541,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
- return 1;
+ return true;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
@@ -7542,7 +7551,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
- return 0;
+ return false;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS:
@@ -7551,7 +7560,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_APIC_WRITE:
case EXIT_REASON_EOI_INDUCED:
/* apic_write and eoi_induced should exit unconditionally. */
- return 1;
+ return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
@@ -7559,7 +7568,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
- return 0;
+ return false;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
@@ -7567,11 +7576,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
- return 0;
+ return false;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
- return 1;
+ return true;
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
/*
* This should never happen, since it is not possible to
@@ -7581,7 +7590,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default:
- return 1;
+ return true;
}
}
@@ -8516,6 +8525,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
exec_control);
}
}
+ if (nested && !vmx->rdtscp_enabled)
+ vmx->nested.nested_vmx_secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDTSCP;
}
/* Exposing INVPCID only when PCID is exposed */
@@ -8616,10 +8628,11 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
+ if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
+ vmcs12->apic_access_addr >> maxphyaddr)
return false;
/*
@@ -8635,8 +8648,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr))
+ if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
+ vmcs12->virtual_apic_page_addr >> maxphyaddr)
return false;
if (vmx->nested.virtual_apic_page) /* shouldn't happen */
@@ -8659,7 +8672,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has_posted_intr(vmcs12)) {
- if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
+ if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
+ vmcs12->posted_intr_desc_addr >> maxphyaddr)
return false;
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
@@ -8858,9 +8872,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field,
- unsigned long addr_field,
- int maxphyaddr)
+ unsigned long addr_field)
{
+ int maxphyaddr;
u64 count, addr;
if (vmcs12_read_any(vcpu, count_field, &count) ||
@@ -8870,6 +8884,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
}
if (count == 0)
return 0;
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
pr_warn_ratelimited(
@@ -8883,19 +8898,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int maxphyaddr;
-
if (vmcs12->vm_exit_msr_load_count == 0 &&
vmcs12->vm_exit_msr_store_count == 0 &&
vmcs12->vm_entry_msr_load_count == 0)
return 0; /* Fast path */
- maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
- VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_LOAD_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_STORE_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
+ VM_ENTRY_MSR_LOAD_ADDR))
return -EINVAL;
return 0;
}
@@ -9145,8 +9157,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control &= ~SECONDARY_EXEC_RDTSCP;
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT);
+ SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -9218,9 +9231,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
if (cpu_has_vmx_msr_bitmap() &&
- exec_control & CPU_BASED_USE_MSR_BITMAPS &&
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) {
- vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested));
+ exec_control & CPU_BASED_USE_MSR_BITMAPS) {
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
+ /* MSR_BITMAP will be set by following vmx_set_efer. */
} else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
@@ -9379,7 +9392,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
- /*TODO: Also verify bits beyond physical address width are 0*/
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
@@ -9518,7 +9530,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmcs12->launch_state = 1;
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
vmx->nested.nested_run_pending = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70b..e1a8126 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ for (i = 0; i < KVM_NR_DB_REGS; i++)
+ vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+}
+
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
@@ -1070,19 +1081,19 @@ static void update_pvclock_gtod(struct timekeeper *tk)
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
u64 boot_ns;
- boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
+ boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
- vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
- vdata->clock.cycle_last = tk->tkr.cycle_last;
- vdata->clock.mask = tk->tkr.mask;
- vdata->clock.mult = tk->tkr.mult;
- vdata->clock.shift = tk->tkr.shift;
+ vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+ vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
+ vdata->clock.mask = tk->tkr_mono.mask;
+ vdata->clock.mult = tk->tkr_mono.mult;
+ vdata->clock.shift = tk->tkr_mono.shift;
vdata->boot_ns = boot_ns;
- vdata->nsec_base = tk->tkr.xtime_nsec;
+ vdata->nsec_base = tk->tkr_mono.xtime_nsec;
write_seqcount_end(&vdata->seq);
}
@@ -2744,7 +2755,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
- case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_IOEVENTFD_NO_LENGTH:
case KVM_CAP_PIT2:
@@ -3150,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = dbgregs->dr7;
@@ -4115,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
+ && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
@@ -4135,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
+ addr, n, v))
+ && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
@@ -4476,7 +4488,8 @@ mmio:
return X86EMUL_CONTINUE;
}
-int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
+static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
@@ -4539,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
exception, &read_emultor);
}
-int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
+static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
@@ -4630,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;
if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
return r;
@@ -4706,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
-int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
@@ -4723,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
wbinvd();
return X86EMUL_CONTINUE;
}
+
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_emulate_wbinvd_noskip(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
+
+
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
- kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
+ kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
}
-int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
+static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long *dest)
{
return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
-int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
+static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long value)
{
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
@@ -5817,7 +5840,7 @@ void kvm_arch_exit(void)
free_percpu(shared_msrs);
}
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5828,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
return 0;
}
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_vcpu_halt(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5904,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
lapic_irq.dest_id = apicid;
lapic_irq.delivery_mode = APIC_DM_REMRD;
- kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
+ kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
@@ -5912,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r = 1;
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
@@ -6165,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
}
/*
- * Returns 1 to let __vcpu_run() continue the guest execution loop without
+ * Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -6302,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -6383,42 +6416,47 @@ out:
return r;
}
+static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_block(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
+ return 1;
+ }
+
+ kvm_apic_accept_events(vcpu);
+ switch(vcpu->arch.mp_state) {
+ case KVM_MP_STATE_HALTED:
+ vcpu->arch.pv.pv_unhalted = false;
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.apf.halted = false;
+ break;
+ case KVM_MP_STATE_INIT_RECEIVED:
+ break;
+ default:
+ return -EINTR;
+ break;
+ }
+ return 1;
+}
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = 1;
- while (r > 0) {
+ for (;;) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
- else {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_apic_accept_events(vcpu);
- switch(vcpu->arch.mp_state) {
- case KVM_MP_STATE_HALTED:
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.apf.halted = false;
- break;
- case KVM_MP_STATE_INIT_RECEIVED:
- break;
- default:
- r = -EINTR;
- break;
- }
- }
- }
-
+ else
+ r = vcpu_block(kvm, vcpu);
if (r <= 0)
break;
@@ -6430,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
+ break;
}
kvm_check_async_pf_completion(vcpu);
@@ -6438,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
+ break;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -6569,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
- r = __vcpu_run(vcpu);
+ r = vcpu_run(vcpu);
out:
post_kvm_run_save(vcpu);
@@ -7076,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_clear_exception_queue(vcpu);
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = DR6_INIT;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
+ vcpu->arch.cr2 = 0;
+
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
@@ -7241,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
- if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
+ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -7289,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 = 0;
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
@@ -7429,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
- kvm_kvfree(free->arch.rmap[i]);
+ kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
@@ -7437,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
- kvm_kvfree(free->arch.lpage_info[i - 1]);
+ kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
@@ -7491,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvm_kvfree(slot->arch.rmap[i]);
+ kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
if (i == 0)
continue;
- kvm_kvfree(slot->arch.lpage_info[i - 1]);
+ kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
@@ -7619,6 +7664,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
new = id_to_memslot(kvm->memslots, mem->slot);
/*
+ * Dirty logging tracks sptes in 4k granularity, meaning that large
+ * sptes have to be split. If live migration is successful, the guest
+ * in the source machine will be destroyed and large sptes will be
+ * created in the destination. However, if the guest continues to run
+ * in the source machine (for example if live migration fails), small
+ * sptes will remain around and cause bad performance.
+ *
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
+ */
+ if ((change != KVM_MR_DELETE) &&
+ (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+
+ /*
* Set up write protection and/or dirty logging for the new slot.
*
* For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index ac4453d..717908b 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -868,7 +868,8 @@ static void __init lguest_init_IRQ(void)
/* Some systems map "vectors" to interrupts weirdly. Not us! */
__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
if (i != SYSCALL_VECTOR)
- set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
+ set_intr_gate(i, irq_entries_start +
+ 8 * (i - FIRST_EXTERNAL_VECTOR));
}
/*
@@ -1076,6 +1077,7 @@ static void lguest_load_sp0(struct tss_struct *tss,
{
lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
THREAD_SIZE / PAGE_SIZE);
+ tss->x86_tss.sp0 = thread->sp0;
}
/* Let's just say, I wouldn't do debugging under a Guest. */
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index f5cc9eb..082a851 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -13,16 +13,6 @@
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
-.macro SAVE reg
- pushl_cfi %\reg
- CFI_REL_OFFSET \reg, 0
-.endm
-
-.macro RESTORE reg
- popl_cfi %\reg
- CFI_RESTORE \reg
-.endm
-
.macro read64 reg
movl %ebx, %eax
movl %ecx, %edx
@@ -67,10 +57,10 @@ ENDPROC(atomic64_xchg_cx8)
.macro addsub_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
- SAVE ebp
- SAVE ebx
- SAVE esi
- SAVE edi
+ pushl_cfi_reg ebp
+ pushl_cfi_reg ebx
+ pushl_cfi_reg esi
+ pushl_cfi_reg edi
movl %eax, %esi
movl %edx, %edi
@@ -89,10 +79,10 @@ ENTRY(atomic64_\func\()_return_cx8)
10:
movl %ebx, %eax
movl %ecx, %edx
- RESTORE edi
- RESTORE esi
- RESTORE ebx
- RESTORE ebp
+ popl_cfi_reg edi
+ popl_cfi_reg esi
+ popl_cfi_reg ebx
+ popl_cfi_reg ebp
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
@@ -104,7 +94,7 @@ addsub_return sub sub sbb
.macro incdec_return func ins insc
ENTRY(atomic64_\func\()_return_cx8)
CFI_STARTPROC
- SAVE ebx
+ pushl_cfi_reg ebx
read64 %esi
1:
@@ -119,7 +109,7 @@ ENTRY(atomic64_\func\()_return_cx8)
10:
movl %ebx, %eax
movl %ecx, %edx
- RESTORE ebx
+ popl_cfi_reg ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_\func\()_return_cx8)
@@ -130,7 +120,7 @@ incdec_return dec sub sbb
ENTRY(atomic64_dec_if_positive_cx8)
CFI_STARTPROC
- SAVE ebx
+ pushl_cfi_reg ebx
read64 %esi
1:
@@ -146,18 +136,18 @@ ENTRY(atomic64_dec_if_positive_cx8)
2:
movl %ebx, %eax
movl %ecx, %edx
- RESTORE ebx
+ popl_cfi_reg ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8)
ENTRY(atomic64_add_unless_cx8)
CFI_STARTPROC
- SAVE ebp
- SAVE ebx
+ pushl_cfi_reg ebp
+ pushl_cfi_reg ebx
/* these just push these two parameters on the stack */
- SAVE edi
- SAVE ecx
+ pushl_cfi_reg edi
+ pushl_cfi_reg ecx
movl %eax, %ebp
movl %edx, %edi
@@ -179,8 +169,8 @@ ENTRY(atomic64_add_unless_cx8)
3:
addl $8, %esp
CFI_ADJUST_CFA_OFFSET -8
- RESTORE ebx
- RESTORE ebp
+ popl_cfi_reg ebx
+ popl_cfi_reg ebp
ret
4:
cmpl %edx, 4(%esp)
@@ -192,7 +182,7 @@ ENDPROC(atomic64_add_unless_cx8)
ENTRY(atomic64_inc_not_zero_cx8)
CFI_STARTPROC
- SAVE ebx
+ pushl_cfi_reg ebx
read64 %esi
1:
@@ -209,7 +199,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax
3:
- RESTORE ebx
+ popl_cfi_reg ebx
ret
CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index e78b8ee..9bc944a 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -51,10 +51,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
*/
ENTRY(csum_partial)
CFI_STARTPROC
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
+ pushl_cfi_reg esi
+ pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: unsigned char *buff
@@ -127,14 +125,12 @@ ENTRY(csum_partial)
6: addl %ecx,%eax
adcl $0, %eax
7:
- testl $1, 12(%esp)
+ testb $1, 12(%esp)
jz 8f
roll $8, %eax
8:
- popl_cfi %ebx
- CFI_RESTORE ebx
- popl_cfi %esi
- CFI_RESTORE esi
+ popl_cfi_reg ebx
+ popl_cfi_reg esi
ret
CFI_ENDPROC
ENDPROC(csum_partial)
@@ -145,10 +141,8 @@ ENDPROC(csum_partial)
ENTRY(csum_partial)
CFI_STARTPROC
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
+ pushl_cfi_reg esi
+ pushl_cfi_reg ebx
movl 20(%esp),%eax # Function arg: unsigned int sum
movl 16(%esp),%ecx # Function arg: int len
movl 12(%esp),%esi # Function arg: const unsigned char *buf
@@ -251,14 +245,12 @@ ENTRY(csum_partial)
addl %ebx,%eax
adcl $0,%eax
80:
- testl $1, 12(%esp)
+ testb $1, 12(%esp)
jz 90f
roll $8, %eax
90:
- popl_cfi %ebx
- CFI_RESTORE ebx
- popl_cfi %esi
- CFI_RESTORE esi
+ popl_cfi_reg ebx
+ popl_cfi_reg esi
ret
CFI_ENDPROC
ENDPROC(csum_partial)
@@ -298,12 +290,9 @@ ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
subl $4,%esp
CFI_ADJUST_CFA_OFFSET 4
- pushl_cfi %edi
- CFI_REL_OFFSET edi, 0
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
+ pushl_cfi_reg edi
+ pushl_cfi_reg esi
+ pushl_cfi_reg ebx
movl ARGBASE+16(%esp),%eax # sum
movl ARGBASE+12(%esp),%ecx # len
movl ARGBASE+4(%esp),%esi # src
@@ -412,12 +401,9 @@ DST( movb %cl, (%edi) )
.previous
- popl_cfi %ebx
- CFI_RESTORE ebx
- popl_cfi %esi
- CFI_RESTORE esi
- popl_cfi %edi
- CFI_RESTORE edi
+ popl_cfi_reg ebx
+ popl_cfi_reg esi
+ popl_cfi_reg edi
popl_cfi %ecx # equivalent to addl $4,%esp
ret
CFI_ENDPROC
@@ -441,12 +427,9 @@ ENDPROC(csum_partial_copy_generic)
ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
- pushl_cfi %edi
- CFI_REL_OFFSET edi, 0
- pushl_cfi %esi
- CFI_REL_OFFSET esi, 0
+ pushl_cfi_reg ebx
+ pushl_cfi_reg edi
+ pushl_cfi_reg esi
movl ARGBASE+4(%esp),%esi #src
movl ARGBASE+8(%esp),%edi #dst
movl ARGBASE+12(%esp),%ecx #len
@@ -506,12 +489,9 @@ DST( movb %dl, (%edi) )
jmp 7b
.previous
- popl_cfi %esi
- CFI_RESTORE esi
- popl_cfi %edi
- CFI_RESTORE edi
- popl_cfi %ebx
- CFI_RESTORE ebx
+ popl_cfi_reg esi
+ popl_cfi_reg edi
+ popl_cfi_reg ebx
ret
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index f2145cf..e67e579 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,31 +1,35 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
+#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
/*
- * Zero a page.
- * rdi page
- */
-ENTRY(clear_page_c)
+ * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
+ * recommended to use this when possible and we do use them by default.
+ * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
+ * Otherwise, use original.
+ */
+
+/*
+ * Zero a page.
+ * %rdi - page
+ */
+ENTRY(clear_page)
CFI_STARTPROC
+
+ ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
+ "jmp clear_page_c_e", X86_FEATURE_ERMS
+
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
ret
CFI_ENDPROC
-ENDPROC(clear_page_c)
+ENDPROC(clear_page)
-ENTRY(clear_page_c_e)
+ENTRY(clear_page_orig)
CFI_STARTPROC
- movl $4096,%ecx
- xorl %eax,%eax
- rep stosb
- ret
- CFI_ENDPROC
-ENDPROC(clear_page_c_e)
-ENTRY(clear_page)
- CFI_STARTPROC
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -45,29 +49,13 @@ ENTRY(clear_page)
nop
ret
CFI_ENDPROC
-.Lclear_page_end:
-ENDPROC(clear_page)
-
- /*
- * Some CPUs support enhanced REP MOVSB/STOSB instructions.
- * It is recommended to use this when possible.
- * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
- * Otherwise, use original function.
- *
- */
+ENDPROC(clear_page_orig)
-#include <asm/cpufeature.h>
-
- .section .altinstr_replacement,"ax"
-1: .byte 0xeb /* jmp <disp8> */
- .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
-2: .byte 0xeb /* jmp <disp8> */
- .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */
-3:
- .previous
- .section .altinstructions,"a"
- altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\
- .Lclear_page_end-clear_page, 2b-1b
- altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \
- .Lclear_page_end-clear_page,3b-2b
- .previous
+ENTRY(clear_page_c_e)
+ CFI_STARTPROC
+ movl $4096,%ecx
+ xorl %eax,%eax
+ rep stosb
+ ret
+ CFI_ENDPROC
+ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 176cca6..8239dbc 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -2,23 +2,26 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
+#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
+/*
+ * Some CPUs run faster using the string copy instructions (sane microcode).
+ * It is also a lot simpler. Use this when possible. But, don't use streaming
+ * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the
+ * prefetch distance based on SMP/UP.
+ */
ALIGN
-copy_page_rep:
+ENTRY(copy_page)
CFI_STARTPROC
+ ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
ret
CFI_ENDPROC
-ENDPROC(copy_page_rep)
-
-/*
- * Don't use streaming copy unless the CPU indicates X86_FEATURE_REP_GOOD.
- * Could vary the prefetch distance based on SMP/UP.
-*/
+ENDPROC(copy_page)
-ENTRY(copy_page)
+ENTRY(copy_page_regs)
CFI_STARTPROC
subq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET 2*8
@@ -90,21 +93,5 @@ ENTRY(copy_page)
addq $2*8, %rsp
CFI_ADJUST_CFA_OFFSET -2*8
ret
-.Lcopy_page_end:
CFI_ENDPROC
-ENDPROC(copy_page)
-
- /* Some CPUs run faster using the string copy instructions.
- It is also a lot simpler. Use this when possible */
-
-#include <asm/cpufeature.h>
-
- .section .altinstr_replacement,"ax"
-1: .byte 0xeb /* jmp <disp8> */
- .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
-2:
- .previous
- .section .altinstructions,"a"
- altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \
- .Lcopy_page_end-copy_page, 2b-1b
- .previous
+ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index dee945d..fa997df 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -8,9 +8,6 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
-
-#define FIX_ALIGNMENT 1
-
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
@@ -19,33 +16,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
-/*
- * By placing feature2 after feature1 in altinstructions section, we logically
- * implement:
- * If CPU has feature2, jmp to alt2 is used
- * else if CPU has feature1, jmp to alt1 is used
- * else jmp to orig is used.
- */
- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
-0:
- .byte 0xe9 /* 32bit jump */
- .long \orig-1f /* by default jump to orig */
-1:
- .section .altinstr_replacement,"ax"
-2: .byte 0xe9 /* near jump with 32bit immediate */
- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
-3: .byte 0xe9 /* near jump with 32bit immediate */
- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
- .previous
-
- .section .altinstructions,"a"
- altinstruction_entry 0b,2b,\feature1,5,5
- altinstruction_entry 0b,3b,\feature2,5,5
- .previous
- .endm
-
.macro ALIGN_DESTINATION
-#ifdef FIX_ALIGNMENT
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
@@ -67,7 +38,6 @@
_ASM_EXTABLE(100b,103b)
_ASM_EXTABLE(101b,103b)
-#endif
.endm
/* Standard copy_to_user with segment limit checking */
@@ -79,9 +49,11 @@ ENTRY(_copy_to_user)
jc bad_to_user
cmpq TI_addr_limit(%rax),%rcx
ja bad_to_user
- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
- copy_user_generic_unrolled,copy_user_generic_string, \
- copy_user_enhanced_fast_string
+ ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
+ "jmp copy_user_generic_string", \
+ X86_FEATURE_REP_GOOD, \
+ "jmp copy_user_enhanced_fast_string", \
+ X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_to_user)
@@ -94,9 +66,11 @@ ENTRY(_copy_from_user)
jc bad_from_user
cmpq TI_addr_limit(%rax),%rcx
ja bad_from_user
- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
- copy_user_generic_unrolled,copy_user_generic_string, \
- copy_user_enhanced_fast_string
+ ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
+ "jmp copy_user_generic_string", \
+ X86_FEATURE_REP_GOOD, \
+ "jmp copy_user_enhanced_fast_string", \
+ X86_FEATURE_ERMS
CFI_ENDPROC
ENDPROC(_copy_from_user)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 2419d5f..9734182 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -196,7 +196,7 @@ ENTRY(csum_partial_copy_generic)
/* handle last odd byte */
.Lhandle_1:
- testl $1, %r10d
+ testb $1, %r10b
jz .Lende
xorl %ebx, %ebx
source
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 1313ae6..8f72b33 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -52,6 +52,13 @@
*/
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
{
+ /*
+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+ * even if the input buffer is long enough to hold them.
+ */
+ if (buf_len > MAX_INSN_SIZE)
+ buf_len = MAX_INSN_SIZE;
+
memset(insn, 0, sizeof(*insn));
insn->kaddr = kaddr;
insn->end_kaddr = kaddr + buf_len;
@@ -164,6 +171,12 @@ found:
/* VEX.W overrides opnd_size */
insn->opnd_bytes = 8;
} else {
+ /*
+ * For VEX2, fake VEX3-like byte#2.
+ * Makes it easier to decode vex.W, vex.vvvv,
+ * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+ */
+ insn->vex_prefix.bytes[2] = b2 & 0x7f;
insn->vex_prefix.nbytes = 2;
insn->next_byte += 2;
}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 89b53c9..b046664 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,12 +1,20 @@
/* Copyright 2002 Andi Kleen */
#include <linux/linkage.h>
-
#include <asm/cpufeature.h>
#include <asm/dwarf2.h>
#include <asm/alternative-asm.h>
/*
+ * We build a jump to memcpy_orig by default which gets NOPped out on
+ * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
+ * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
+ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
+ */
+
+.weak memcpy
+
+/*
* memcpy - Copy a memory block.
*
* Input:
@@ -17,15 +25,11 @@
* Output:
* rax original destination
*/
+ENTRY(__memcpy)
+ENTRY(memcpy)
+ ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
+ "jmp memcpy_erms", X86_FEATURE_ERMS
-/*
- * memcpy_c() - fast string ops (REP MOVSQ) based variant.
- *
- * This gets patched over the unrolled variant (below) via the
- * alternative instructions framework:
- */
- .section .altinstr_replacement, "ax", @progbits
-.Lmemcpy_c:
movq %rdi, %rax
movq %rdx, %rcx
shrq $3, %rcx
@@ -34,29 +38,21 @@
movl %edx, %ecx
rep movsb
ret
-.Lmemcpy_e:
- .previous
+ENDPROC(memcpy)
+ENDPROC(__memcpy)
/*
- * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
- * memcpy_c. Use memcpy_c_e when possible.
- *
- * This gets patched over the unrolled variant (below) via the
- * alternative instructions framework:
+ * memcpy_erms() - enhanced fast string memcpy. This is faster and
+ * simpler than memcpy. Use memcpy_erms when possible.
*/
- .section .altinstr_replacement, "ax", @progbits
-.Lmemcpy_c_e:
+ENTRY(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
-.Lmemcpy_e_e:
- .previous
-
-.weak memcpy
+ENDPROC(memcpy_erms)
-ENTRY(__memcpy)
-ENTRY(memcpy)
+ENTRY(memcpy_orig)
CFI_STARTPROC
movq %rdi, %rax
@@ -183,26 +179,4 @@ ENTRY(memcpy)
.Lend:
retq
CFI_ENDPROC
-ENDPROC(memcpy)
-ENDPROC(__memcpy)
-
- /*
- * Some CPUs are adding enhanced REP MOVSB/STOSB feature
- * If the feature is supported, memcpy_c_e() is the first choice.
- * If enhanced rep movsb copy is not available, use fast string copy
- * memcpy_c() when possible. This is faster and code is simpler than
- * original memcpy().
- * Otherwise, original memcpy() is used.
- * In .altinstructions section, ERMS feature is placed after REG_GOOD
- * feature to implement the right patch order.
- *
- * Replace only beginning, memcpy is used to apply alternatives,
- * so it is silly to overwrite itself with nops - reboot is the
- * only outcome...
- */
- .section .altinstructions, "a"
- altinstruction_entry __memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
- .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
- altinstruction_entry __memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
- .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
- .previous
+ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 9c4b530..0f8a0d0 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -5,7 +5,6 @@
* This assembly file is re-written from memmove_64.c file.
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/
-#define _STRING_C
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
@@ -44,6 +43,8 @@ ENTRY(__memmove)
jg 2f
.Lmemmove_begin_forward:
+ ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
+
/*
* movsq instruction have many startup latency
* so we handle small size by general register.
@@ -207,21 +208,5 @@ ENTRY(__memmove)
13:
retq
CFI_ENDPROC
-
- .section .altinstr_replacement,"ax"
-.Lmemmove_begin_forward_efs:
- /* Forward moving data. */
- movq %rdx, %rcx
- rep movsb
- retq
-.Lmemmove_end_forward_efs:
- .previous
-
- .section .altinstructions,"a"
- altinstruction_entry .Lmemmove_begin_forward, \
- .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS, \
- .Lmemmove_end_forward-.Lmemmove_begin_forward, \
- .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
- .previous
ENDPROC(__memmove)
ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 6f44935..93118fb 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -5,19 +5,30 @@
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
+.weak memset
+
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
* simpler and shorter than the orignal function as well.
- *
+ *
* rdi destination
- * rsi value (char)
- * rdx count (bytes)
- *
+ * rsi value (char)
+ * rdx count (bytes)
+ *
* rax original destination
- */
- .section .altinstr_replacement, "ax", @progbits
-.Lmemset_c:
+ */
+ENTRY(memset)
+ENTRY(__memset)
+ /*
+ * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
+ * to use it when possible. If not available, use fast string instructions.
+ *
+ * Otherwise, use original memset function.
+ */
+ ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
+ "jmp memset_erms", X86_FEATURE_ERMS
+
movq %rdi,%r9
movq %rdx,%rcx
andl $7,%edx
@@ -31,8 +42,8 @@
rep stosb
movq %r9,%rax
ret
-.Lmemset_e:
- .previous
+ENDPROC(memset)
+ENDPROC(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
@@ -45,21 +56,16 @@
*
* rax original destination
*/
- .section .altinstr_replacement, "ax", @progbits
-.Lmemset_c_e:
+ENTRY(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
-.Lmemset_e_e:
- .previous
-
-.weak memset
+ENDPROC(memset_erms)
-ENTRY(memset)
-ENTRY(__memset)
+ENTRY(memset_orig)
CFI_STARTPROC
movq %rdi,%r10
@@ -134,23 +140,4 @@ ENTRY(__memset)
jmp .Lafter_bad_alignment
.Lfinal:
CFI_ENDPROC
-ENDPROC(memset)
-ENDPROC(__memset)
-
- /* Some CPUs support enhanced REP MOVSB/STOSB feature.
- * It is recommended to use this when possible.
- *
- * If enhanced REP MOVSB/STOSB feature is not available, use fast string
- * instructions.
- *
- * Otherwise, use original memset function.
- *
- * In .altinstructions section, ERMS feature is placed after REG_GOOD
- * feature to implement the right patch order.
- */
- .section .altinstructions,"a"
- altinstruction_entry __memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
- .Lfinal-__memset,.Lmemset_e-.Lmemset_c
- altinstruction_entry __memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
- .Lfinal-__memset,.Lmemset_e_e-.Lmemset_c_e
- .previous
+ENDPROC(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index f6d13ee..3ca5218 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -14,8 +14,8 @@
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
CFI_STARTPROC
- pushq_cfi %rbx
- pushq_cfi %rbp
+ pushq_cfi_reg rbx
+ pushq_cfi_reg rbp
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
@@ -35,8 +35,8 @@ ENTRY(\op\()_safe_regs)
movl %ebp, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
- popq_cfi %rbp
- popq_cfi %rbx
+ popq_cfi_reg rbp
+ popq_cfi_reg rbx
ret
3:
CFI_RESTORE_STATE
@@ -53,10 +53,10 @@ ENDPROC(\op\()_safe_regs)
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
CFI_STARTPROC
- pushl_cfi %ebx
- pushl_cfi %ebp
- pushl_cfi %esi
- pushl_cfi %edi
+ pushl_cfi_reg ebx
+ pushl_cfi_reg ebp
+ pushl_cfi_reg esi
+ pushl_cfi_reg edi
pushl_cfi $0 /* Return value */
pushl_cfi %eax
movl 4(%eax), %ecx
@@ -80,10 +80,10 @@ ENTRY(\op\()_safe_regs)
movl %esi, 24(%eax)
movl %edi, 28(%eax)
popl_cfi %eax
- popl_cfi %edi
- popl_cfi %esi
- popl_cfi %ebp
- popl_cfi %ebx
+ popl_cfi_reg edi
+ popl_cfi_reg esi
+ popl_cfi_reg ebp
+ popl_cfi_reg ebx
ret
3:
CFI_RESTORE_STATE
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 5dff5f0..2322abe 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -34,10 +34,10 @@
*/
#define save_common_regs \
- pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0
+ pushl_cfi_reg ecx
#define restore_common_regs \
- popl_cfi %ecx; CFI_RESTORE ecx
+ popl_cfi_reg ecx
/* Avoid uglifying the argument copying x86-64 needs to do. */
.macro movq src, dst
@@ -64,22 +64,22 @@
*/
#define save_common_regs \
- pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
- pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
- pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \
- pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \
- pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \
- pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \
- pushq_cfi %r11; CFI_REL_OFFSET r11, 0
+ pushq_cfi_reg rdi; \
+ pushq_cfi_reg rsi; \
+ pushq_cfi_reg rcx; \
+ pushq_cfi_reg r8; \
+ pushq_cfi_reg r9; \
+ pushq_cfi_reg r10; \
+ pushq_cfi_reg r11
#define restore_common_regs \
- popq_cfi %r11; CFI_RESTORE r11; \
- popq_cfi %r10; CFI_RESTORE r10; \
- popq_cfi %r9; CFI_RESTORE r9; \
- popq_cfi %r8; CFI_RESTORE r8; \
- popq_cfi %rcx; CFI_RESTORE rcx; \
- popq_cfi %rsi; CFI_RESTORE rsi; \
- popq_cfi %rdi; CFI_RESTORE rdi
+ popq_cfi_reg r11; \
+ popq_cfi_reg r10; \
+ popq_cfi_reg r9; \
+ popq_cfi_reg r8; \
+ popq_cfi_reg rcx; \
+ popq_cfi_reg rsi; \
+ popq_cfi_reg rdi
#endif
@@ -87,12 +87,10 @@
ENTRY(call_rwsem_down_read_failed)
CFI_STARTPROC
save_common_regs
- __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
- CFI_REL_OFFSET __ASM_REG(dx), 0
+ __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
movq %rax,%rdi
call rwsem_down_read_failed
- __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
- CFI_RESTORE __ASM_REG(dx)
+ __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
@@ -124,12 +122,10 @@ ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake)
CFI_STARTPROC
save_common_regs
- __ASM_SIZE(push,_cfi) %__ASM_REG(dx)
- CFI_REL_OFFSET __ASM_REG(dx), 0
+ __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
movq %rax,%rdi
call rwsem_downgrade_wake
- __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
- CFI_RESTORE __ASM_REG(dx)
+ __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
restore_common_regs
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index e28cdaf..5eb7150 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -13,12 +13,9 @@
.globl \name
\name:
CFI_STARTPROC
- pushl_cfi %eax
- CFI_REL_OFFSET eax, 0
- pushl_cfi %ecx
- CFI_REL_OFFSET ecx, 0
- pushl_cfi %edx
- CFI_REL_OFFSET edx, 0
+ pushl_cfi_reg eax
+ pushl_cfi_reg ecx
+ pushl_cfi_reg edx
.if \put_ret_addr_in_eax
/* Place EIP in the arg1 */
@@ -26,12 +23,9 @@
.endif
call \func
- popl_cfi %edx
- CFI_RESTORE edx
- popl_cfi %ecx
- CFI_RESTORE ecx
- popl_cfi %eax
- CFI_RESTORE eax
+ popl_cfi_reg edx
+ popl_cfi_reg ecx
+ popl_cfi_reg eax
ret
CFI_ENDPROC
_ASM_NOKPROBE(\name)
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index b30b5eb..f89ba4e9 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -17,9 +17,18 @@
CFI_STARTPROC
/* this one pushes 9 elems, the next one would be %rIP */
- SAVE_ARGS
+ pushq_cfi_reg rdi
+ pushq_cfi_reg rsi
+ pushq_cfi_reg rdx
+ pushq_cfi_reg rcx
+ pushq_cfi_reg rax
+ pushq_cfi_reg r8
+ pushq_cfi_reg r9
+ pushq_cfi_reg r10
+ pushq_cfi_reg r11
.if \put_ret_addr_in_rdi
+ /* 9*8(%rsp) is return addr on stack */
movq_cfi_restore 9*8, rdi
.endif
@@ -45,11 +54,22 @@
#endif
#endif
- /* SAVE_ARGS below is used only for the .cfi directives it contains. */
+#if defined(CONFIG_TRACE_IRQFLAGS) \
+ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
+ || defined(CONFIG_PREEMPT)
CFI_STARTPROC
- SAVE_ARGS
+ CFI_ADJUST_CFA_OFFSET 9*8
restore:
- RESTORE_ARGS
+ popq_cfi_reg r11
+ popq_cfi_reg r10
+ popq_cfi_reg r9
+ popq_cfi_reg r8
+ popq_cfi_reg rax
+ popq_cfi_reg rcx
+ popq_cfi_reg rdx
+ popq_cfi_reg rsi
+ popq_cfi_reg rdi
ret
CFI_ENDPROC
_ASM_NOKPROBE(restore)
+#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index c905e89..1f33b3d 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -69,21 +69,20 @@ EXPORT_SYMBOL(copy_in_user);
* it is not necessary to optimize tail handling.
*/
__visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char *to, char *from, unsigned len)
{
- char c;
- unsigned zero_len;
-
for (; len; --len, to++) {
+ char c;
+
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
-
- for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
- if (__put_user_nocheck(c, to++, sizeof(char)))
- break;
clac();
+
+ /* If the destination is a kernel buffer, we always clear the end */
+ if ((unsigned long)to >= TASK_SIZE_MAX)
+ memset(to, 0, len);
return len;
}
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 1a2be7c..816488c 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -273,6 +273,9 @@ dd: ESC
de: ESC
df: ESC
# 0xe0 - 0xef
+# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
e0: LOOPNE/LOOPNZ Jb (f64)
e1: LOOPE/LOOPZ Jb (f64)
e2: LOOP Jb (f64)
@@ -281,6 +284,10 @@ e4: IN AL,Ib
e5: IN eAX,Ib
e6: OUT Ib,AL
e7: OUT Ib,eAX
+# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+# in "near" jumps and calls is 16-bit. For CALL,
+# push of return address is 16-bit wide, RSP is decremented by 2
+# but is not truncated to 16 bits, unlike RIP.
e8: CALL Jz (f64)
e9: JMP-near Jz (f64)
ea: JMP-far Ap (i64)
@@ -456,6 +463,7 @@ AVXcode: 1
7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
# 0x0f 0x80-0x8f
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
80: JO Jz (f64)
81: JNO Jz (f64)
82: JB/JC/JNAE Jz (f64)
@@ -842,6 +850,7 @@ EndTable
GrpTable: Grp5
0: INC Ev
1: DEC Ev
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
2: CALLN Ev (f64)
3: CALLF Ep
4: JMPN Ev (f64)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index c4cc740..a482d10 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -32,6 +32,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
-obj-$(CONFIG_MEMTEST) += memtest.o
-
obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ede025f..181c53b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -59,7 +59,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
- if (kprobes_built_in() && !user_mode_vm(regs)) {
+ if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
@@ -148,7 +148,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
- if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
return 0;
while (instr < max_instr) {
@@ -1035,7 +1035,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (error_code & PF_USER)
return false;
- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
return false;
return true;
@@ -1140,7 +1140,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
- if (user_mode_vm(regs)) {
+ if (user_mode(regs)) {
local_irq_enable();
error_code |= PF_USER;
flags |= FAULT_FLAG_USER;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a110efc..1d55318 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -29,29 +29,33 @@
/*
* Tables translating between page_cache_type_t and pte encoding.
- * Minimal supported modes are defined statically, modified if more supported
- * cache modes are available.
- * Index into __cachemode2pte_tbl is the cachemode.
- * Index into __pte2cachemode_tbl are the caching attribute bits of the pte
- * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
+ *
+ * Minimal supported modes are defined statically, they are modified
+ * during bootup if more supported cache modes are available.
+ *
+ * Index into __cachemode2pte_tbl[] is the cachemode.
+ *
+ * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
+ * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
*/
uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
- [_PAGE_CACHE_MODE_WB] = 0,
- [_PAGE_CACHE_MODE_WC] = _PAGE_PWT,
- [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD,
- [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT,
- [_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
- [_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
+ [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
+ [_PAGE_CACHE_MODE_WC ] = _PAGE_PWT | 0 ,
+ [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
+ [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
+ [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
+ [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
};
EXPORT_SYMBOL(__cachemode2pte_tbl);
+
uint8_t __pte2cachemode_tbl[8] = {
- [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
- [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
- [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS,
- [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC,
- [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
- [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
- [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
+ [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
+ [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_WC,
+ [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
+ [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
+ [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
+ [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
+ [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
EXPORT_SYMBOL(__pte2cachemode_tbl);
@@ -131,21 +135,7 @@ void __init early_alloc_pgt_buf(void)
int after_bootmem;
-int direct_gbpages
-#ifdef CONFIG_DIRECT_GBPAGES
- = 1
-#endif
-;
-
-static void __init init_gbpages(void)
-{
-#ifdef CONFIG_X86_64
- if (direct_gbpages && cpu_has_gbpages)
- printk(KERN_INFO "Using GB pages for direct mapping\n");
- else
- direct_gbpages = 0;
-#endif
-}
+early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
struct map_range {
unsigned long start;
@@ -157,16 +147,12 @@ static int page_size_mask;
static void __init probe_page_size_mask(void)
{
- init_gbpages();
-
#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
- if (direct_gbpages)
- page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse)
page_size_mask |= 1 << PG_LEVEL_2M;
#endif
@@ -179,6 +165,15 @@ static void __init probe_page_size_mask(void)
if (cpu_has_pge) {
cr4_set_bits_and_update_boot(X86_CR4_PGE);
__supported_pte_mask |= _PAGE_GLOBAL;
+ } else
+ __supported_pte_mask &= ~_PAGE_GLOBAL;
+
+ /* Enable 1 GB linear kernel mappings if available: */
+ if (direct_gbpages && cpu_has_gbpages) {
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ page_size_mask |= 1 << PG_LEVEL_1G;
+ } else {
+ direct_gbpages = 0;
}
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 30eb05a..3fba623 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -130,20 +130,6 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
return 0;
}
-static int __init parse_direct_gbpages_off(char *arg)
-{
- direct_gbpages = 0;
- return 0;
-}
-early_param("nogbpages", parse_direct_gbpages_off);
-
-static int __init parse_direct_gbpages_on(char *arg)
-{
- direct_gbpages = 1;
- return 0;
-}
-early_param("gbpages", parse_direct_gbpages_on);
-
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index fdf617c..5ead4d6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -67,8 +67,13 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
/*
* Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
+ * address space. It transparently creates kernel huge I/O mapping when
+ * the physical address is aligned by a huge page size (1GB or 2MB) and
+ * the requested size is at least the huge page size.
+ *
+ * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
+ * Therefore, the mapping code falls back to use a smaller page toward 4KB
+ * when a mapping range is covered by non-WB type of MTRRs.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
@@ -326,6 +331,20 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
+int arch_ioremap_pud_supported(void)
+{
+#ifdef CONFIG_X86_64
+ return cpu_has_gbpages;
+#else
+ return 0;
+#endif
+}
+
+int arch_ioremap_pmd_supported(void)
+{
+ return cpu_has_pse;
+}
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
deleted file mode 100644
index 1e9da79..0000000
--- a/arch/x86/mm/memtest.c
+++ /dev/null
@@ -1,118 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/pfn.h>
-#include <linux/memblock.h>
-
-static u64 patterns[] __initdata = {
- /* The first entry has to be 0 to leave memtest with zeroed memory */
- 0,
- 0xffffffffffffffffULL,
- 0x5555555555555555ULL,
- 0xaaaaaaaaaaaaaaaaULL,
- 0x1111111111111111ULL,
- 0x2222222222222222ULL,
- 0x4444444444444444ULL,
- 0x8888888888888888ULL,
- 0x3333333333333333ULL,
- 0x6666666666666666ULL,
- 0x9999999999999999ULL,
- 0xccccccccccccccccULL,
- 0x7777777777777777ULL,
- 0xbbbbbbbbbbbbbbbbULL,
- 0xddddddddddddddddULL,
- 0xeeeeeeeeeeeeeeeeULL,
- 0x7a6c7258554e494cULL, /* yeah ;-) */
-};
-
-static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
-{
- printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
- (unsigned long long) pattern,
- (unsigned long long) start_bad,
- (unsigned long long) end_bad);
- memblock_reserve(start_bad, end_bad - start_bad);
-}
-
-static void __init memtest(u64 pattern, u64 start_phys, u64 size)
-{
- u64 *p, *start, *end;
- u64 start_bad, last_bad;
- u64 start_phys_aligned;
- const size_t incr = sizeof(pattern);
-
- start_phys_aligned = ALIGN(start_phys, incr);
- start = __va(start_phys_aligned);
- end = start + (size - (start_phys_aligned - start_phys)) / incr;
- start_bad = 0;
- last_bad = 0;
-
- for (p = start; p < end; p++)
- *p = pattern;
-
- for (p = start; p < end; p++, start_phys_aligned += incr) {
- if (*p == pattern)
- continue;
- if (start_phys_aligned == last_bad + incr) {
- last_bad += incr;
- continue;
- }
- if (start_bad)
- reserve_bad_mem(pattern, start_bad, last_bad + incr);
- start_bad = last_bad = start_phys_aligned;
- }
- if (start_bad)
- reserve_bad_mem(pattern, start_bad, last_bad + incr);
-}
-
-static void __init do_one_pass(u64 pattern, u64 start, u64 end)
-{
- u64 i;
- phys_addr_t this_start, this_end;
-
- for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) {
- this_start = clamp_t(phys_addr_t, this_start, start, end);
- this_end = clamp_t(phys_addr_t, this_end, start, end);
- if (this_start < this_end) {
- printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
- (unsigned long long)this_start,
- (unsigned long long)this_end,
- (unsigned long long)cpu_to_be64(pattern));
- memtest(pattern, this_start, this_end - this_start);
- }
- }
-}
-
-/* default is disabled */
-static int memtest_pattern __initdata;
-
-static int __init parse_memtest(char *arg)
-{
- if (arg)
- memtest_pattern = simple_strtoul(arg, NULL, 0);
- else
- memtest_pattern = ARRAY_SIZE(patterns);
-
- return 0;
-}
-
-early_param("memtest", parse_memtest);
-
-void __init early_memtest(unsigned long start, unsigned long end)
-{
- unsigned int i;
- unsigned int idx = 0;
-
- if (!memtest_pattern)
- return;
-
- printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
- for (i = memtest_pattern-1; i < UINT_MAX; --i) {
- idx = i % ARRAY_SIZE(patterns);
- do_one_pass(patterns[idx], start, end);
- }
-}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index df4552b..9d518d6 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -65,24 +65,23 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;
/*
- * 8 bits of randomness in 32bit mmaps, 20 address space bits
- * 28 bits of randomness in 64bit mmaps, 40 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (mmap_is_ia32())
- rnd = get_random_int() % (1<<8);
- else
- rnd = get_random_int() % (1<<28);
- }
+ * 8 bits of randomness in 32bit mmaps, 20 address space bits
+ * 28 bits of randomness in 64bit mmaps, 40 address space bits
+ */
+ if (mmap_is_ia32())
+ rnd = (unsigned long)get_random_int() % (1<<8);
+ else
+ rnd = (unsigned long)get_random_int() % (1<<28);
+
return rnd << PAGE_SHIFT;
}
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -91,19 +90,19 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
/*
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
-static unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(unsigned long rnd)
{
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;
else
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}
/*
@@ -112,13 +111,18 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_base();
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ mm->mmap_legacy_base = mmap_legacy_base(random_factor);
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index cd4785b..4053bb5 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -482,9 +482,16 @@ static void __init numa_clear_kernel_node_hotplug(void)
&memblock.reserved, mb->nid);
}
- /* Mark all kernel nodes. */
+ /*
+ * Mark all kernel nodes.
+ *
+ * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo
+ * may not include all the memblock.reserved memory ranges because
+ * trim_snb_memory() reserves specific pages for Sandy Bridge graphics.
+ */
for_each_memblock(reserved, r)
- node_set(r->nid, numa_kernel_nodes);
+ if (r->nid != MAX_NUMNODES)
+ node_set(r->nid, numa_kernel_nodes);
/* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
for (i = 0; i < numa_meminfo.nr_blks; i++) {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 536ea2f..89af288 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -81,11 +81,9 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "DirectMap4M: %8lu kB\n",
direct_pages_count[PG_LEVEL_2M] << 12);
#endif
-#ifdef CONFIG_X86_64
if (direct_gbpages)
seq_printf(m, "DirectMap1G: %8lu kB\n",
direct_pages_count[PG_LEVEL_1G] << 20);
-#endif
}
#else
static inline void split_page_count(int level) { }
@@ -1654,13 +1652,11 @@ int set_memory_ro(unsigned long addr, int numpages)
{
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
}
-EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
}
-EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_np(unsigned long addr, int numpages)
{
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7ac6869..35af677 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -610,7 +610,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
#ifdef CONFIG_STRICT_DEVMEM
-/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
+/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
- current->comm, from, to - 1);
+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
+ current->comm, from, to - 1);
return 0;
}
cursor += PAGE_SIZE;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7b22ada..0b97d2c 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -4,6 +4,7 @@
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/mtrr.h>
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
@@ -58,7 +59,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
tlb_remove_page(tlb, pte);
}
-#if PAGETABLE_LEVELS > 2
+#if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
struct page *page = virt_to_page(pmd);
@@ -74,14 +75,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
tlb_remove_page(tlb, page);
}
-#if PAGETABLE_LEVELS > 3
+#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud));
}
-#endif /* PAGETABLE_LEVELS > 3 */
-#endif /* PAGETABLE_LEVELS > 2 */
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
static inline void pgd_list_add(pgd_t *pgd)
{
@@ -117,9 +118,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
/* If the pgd points to a shared pagetable level (either the
ptes in non-PAE, or shared PMD in PAE), then just copy the
references from swapper_pg_dir. */
- if (PAGETABLE_LEVELS == 2 ||
- (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
- PAGETABLE_LEVELS == 4) {
+ if (CONFIG_PGTABLE_LEVELS == 2 ||
+ (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
+ CONFIG_PGTABLE_LEVELS == 4) {
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
@@ -275,12 +276,87 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
}
}
+/*
+ * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
+ * assumes that pgd should be in one page.
+ *
+ * But kernel with PAE paging that is not running as a Xen domain
+ * only needs to allocate 32 bytes for pgd instead of one page.
+ */
+#ifdef CONFIG_X86_PAE
+
+#include <linux/slab.h>
+
+#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_ALIGN 32
+
+static struct kmem_cache *pgd_cache;
+
+static int __init pgd_cache_init(void)
+{
+ /*
+ * When PAE kernel is running as a Xen domain, it does not use
+ * shared kernel pmd. And this requires a whole page for pgd.
+ */
+ if (!SHARED_KERNEL_PMD)
+ return 0;
+
+ /*
+ * when PAE kernel is not running as a Xen domain, it uses
+ * shared kernel pmd. Shared kernel pmd does not require a whole
+ * page for pgd. We are able to just allocate a 32-byte for pgd.
+ * During boot time, we create a 32-byte slab for pgd table allocation.
+ */
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
+ SLAB_PANIC, NULL);
+ if (!pgd_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+core_initcall(pgd_cache_init);
+
+static inline pgd_t *_pgd_alloc(void)
+{
+ /*
+ * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
+ * We allocate one page for pgd.
+ */
+ if (!SHARED_KERNEL_PMD)
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
+
+ /*
+ * Now PAE kernel is not running as a Xen domain. We can allocate
+ * a 32-byte slab for pgd to save memory space.
+ */
+ return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
+}
+
+static inline void _pgd_free(pgd_t *pgd)
+{
+ if (!SHARED_KERNEL_PMD)
+ free_page((unsigned long)pgd);
+ else
+ kmem_cache_free(pgd_cache, pgd);
+}
+#else
+static inline pgd_t *_pgd_alloc(void)
+{
+ return (pgd_t *)__get_free_page(PGALLOC_GFP);
+}
+
+static inline void _pgd_free(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+#endif /* CONFIG_X86_PAE */
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pmd_t *pmds[PREALLOCATED_PMDS];
- pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+ pgd = _pgd_alloc();
if (pgd == NULL)
goto out;
@@ -310,7 +386,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
out_free_pmds:
free_pmds(mm, pmds);
out_free_pgd:
- free_page((unsigned long)pgd);
+ _pgd_free(pgd);
out:
return NULL;
}
@@ -320,7 +396,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd_mop_up_pmds(mm, pgd);
pgd_dtor(pgd);
paravirt_pgd_free(mm, pgd);
- free_page((unsigned long)pgd);
+ _pgd_free(pgd);
}
/*
@@ -485,3 +561,67 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
{
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ u8 mtrr;
+
+ /*
+ * Do not use a huge page when the range is covered by non-WB type
+ * of MTRRs.
+ */
+ mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
+ if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ return 0;
+
+ prot = pgprot_4k_2_large(prot);
+
+ set_pte((pte_t *)pud, pfn_pte(
+ (u64)addr >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ u8 mtrr;
+
+ /*
+ * Do not use a huge page when the range is covered by non-WB type
+ * of MTRRs.
+ */
+ mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
+ if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ return 0;
+
+ prot = pgprot_4k_2_large(prot);
+
+ set_pte((pte_t *)pmd, pfn_pte(
+ (u64)addr >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_large(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_large(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 5d04be5..4e664bd 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
- if (!user_mode_vm(regs)) {
+ if (!user_mode(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
dump_trace(NULL, regs, (unsigned long *)stack, 0,
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 3d2612b..8fd6f44 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -490,7 +490,9 @@ void pcibios_scan_root(int busnum)
if (!bus) {
pci_free_resource_list(&resources);
kfree(sd);
+ return;
}
+ pci_bus_add_devices(bus);
}
void __init pcibios_set_cache_line_size(void)
@@ -513,31 +515,6 @@ void __init pcibios_set_cache_line_size(void)
}
}
-/*
- * Some device drivers assume dev->irq won't change after calling
- * pci_disable_device(). So delay releasing of IRQ resource to driver
- * unbinding time. Otherwise it will break PM subsystem and drivers
- * like xen-pciback etc.
- */
-static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- struct pci_dev *dev = to_pci_dev(data);
-
- if (action != BUS_NOTIFY_UNBOUND_DRIVER)
- return NOTIFY_DONE;
-
- if (pcibios_disable_irq)
- pcibios_disable_irq(dev);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block pci_irq_nb = {
- .notifier_call = pci_irq_notifier,
- .priority = INT_MIN,
-};
-
int __init pcibios_init(void)
{
if (!raw_pci_ops) {
@@ -550,9 +527,6 @@ int __init pcibios_init(void)
if (pci_bf_sort >= pci_force_bf)
pci_sort_breadthfirst();
-
- bus_register_notifier(&pci_bus_type, &pci_irq_nb);
-
return 0;
}
@@ -711,6 +685,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0;
}
+void pcibios_disable_device (struct pci_dev *dev)
+{
+ if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+ pcibios_disable_irq(dev);
+}
+
int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index efb8493..852aa4c 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (dev->irq_managed && dev->irq > 0) {
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
- dev->irq = 0;
}
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index e71b3db..5dc6ca5 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
- if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
+ if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
dev->irq = 0;
dev->irq_managed = 0;
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d143d21..d7f997f 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -67,7 +67,7 @@ void __init efi_bgrt_init(void)
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) {
- image = early_memremap(bgrt_tab->image_address,
+ image = early_ioremap(bgrt_tab->image_address,
sizeof(bmp_header));
ioremapped = true;
if (!image) {
@@ -89,7 +89,7 @@ void __init efi_bgrt_init(void)
}
if (ioremapped) {
- image = early_memremap(bgrt_tab->image_address,
+ image = early_ioremap(bgrt_tab->image_address,
bmp_header.size);
if (!image) {
pr_err("Ignoring BGRT: failed to map image memory\n");
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index dbc8627..02744df 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -85,12 +85,20 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
efi_memory_desc_t *virtual_map)
{
efi_status_t status;
+ unsigned long flags;
+ pgd_t *save_pgd;
- efi_call_phys_prolog();
+ save_pgd = efi_call_phys_prolog();
+
+ /* Disable interrupts around EFI calls: */
+ local_irq_save(flags);
status = efi_call_phys(efi_phys.set_virtual_address_map,
memory_map_size, descriptor_size,
descriptor_version, virtual_map);
- efi_call_phys_epilog();
+ local_irq_restore(flags);
+
+ efi_call_phys_epilog(save_pgd);
+
return status;
}
@@ -491,7 +499,8 @@ void __init efi_init(void)
if (efi_memmap_init())
return;
- print_efi_memmap();
+ if (efi_enabled(EFI_DBG))
+ print_efi_memmap();
}
void __init efi_late_init(void)
@@ -939,6 +948,8 @@ static int __init arch_parse_efi_cmdline(char *str)
{
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
+ if (parse_option_str(str, "debug"))
+ set_bit(EFI_DBG, &efi.flags);
return 0;
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 40e7cda..ed5b673 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -33,11 +33,10 @@
/*
* To make EFI call EFI runtime service in physical addressing mode we need
- * prolog/epilog before/after the invocation to disable interrupt, to
- * claim EFI runtime service handler exclusively and to duplicate a memory in
- * low memory space say 0 - 3G.
+ * prolog/epilog before/after the invocation to claim the EFI runtime service
+ * handler exclusively and to duplicate a memory mapping in low memory space,
+ * say 0 - 3G.
*/
-static unsigned long efi_rt_eflags;
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void) {}
@@ -57,21 +56,24 @@ void __init efi_map_region(efi_memory_desc_t *md)
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
-void __init efi_call_phys_prolog(void)
+pgd_t * __init efi_call_phys_prolog(void)
{
struct desc_ptr gdt_descr;
+ pgd_t *save_pgd;
- local_irq_save(efi_rt_eflags);
-
+ /* Current pgd is swapper_pg_dir, we'll restore it later: */
+ save_pgd = swapper_pg_dir;
load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
+
+ return save_pgd;
}
-void __init efi_call_phys_epilog(void)
+void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
struct desc_ptr gdt_descr;
@@ -79,10 +81,8 @@ void __init efi_call_phys_epilog(void)
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- load_cr3(swapper_pg_dir);
+ load_cr3(save_pgd);
__flush_tlb_all();
-
- local_irq_restore(efi_rt_eflags);
}
void __init efi_runtime_mkexec(void)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 17e80d8..a0ac0f9 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -41,9 +41,6 @@
#include <asm/realmode.h>
#include <asm/time.h>
-static pgd_t *save_pgd __initdata;
-static unsigned long efi_flags __initdata;
-
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
* 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
@@ -78,17 +75,18 @@ static void __init early_code_mapping_set_exec(int executable)
}
}
-void __init efi_call_phys_prolog(void)
+pgd_t * __init efi_call_phys_prolog(void)
{
unsigned long vaddress;
+ pgd_t *save_pgd;
+
int pgd;
int n_pgds;
if (!efi_enabled(EFI_OLD_MEMMAP))
- return;
+ return NULL;
early_code_mapping_set_exec(1);
- local_irq_save(efi_flags);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
@@ -99,24 +97,29 @@ void __init efi_call_phys_prolog(void)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
__flush_tlb_all();
+
+ return save_pgd;
}
-void __init efi_call_phys_epilog(void)
+void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
/*
* After the lock is released, the original page table is restored.
*/
- int pgd;
- int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+ int pgd_idx;
+ int nr_pgds;
- if (!efi_enabled(EFI_OLD_MEMMAP))
+ if (!save_pgd)
return;
- for (pgd = 0; pgd < n_pgds; pgd++)
- set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+ nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+ for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+ set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
+
kfree(save_pgd);
+
__flush_tlb_all();
- local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index c9a0838..278e4da 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -11,6 +11,7 @@
*/
#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
#include <asm/imr.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -101,6 +102,12 @@ static void __init imr_self_test(void)
}
}
+static const struct x86_cpu_id imr_ids[] __initconst = {
+ { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imr_ids);
+
/**
* imr_self_test_init - entry point for IMR driver.
*
@@ -108,7 +115,8 @@ static void __init imr_self_test(void)
*/
static int __init imr_self_test_init(void)
{
- imr_self_test();
+ if (x86_match_cpu(imr_ids))
+ imr_self_test();
return 0;
}
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 9a2e590..7fa8b3b 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -61,7 +61,7 @@ static void battery_status_changed(void)
if (psy) {
power_supply_changed(psy);
- put_device(psy->dev);
+ power_supply_put(psy);
}
}
@@ -71,7 +71,7 @@ static void ac_status_changed(void)
if (psy) {
power_supply_changed(psy);
- put_device(psy->dev);
+ power_supply_put(psy);
}
}
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 08e350e..5513084 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -83,7 +83,7 @@ static void battery_status_changed(void)
if (psy) {
power_supply_changed(psy);
- put_device(psy->dev);
+ power_supply_put(psy);
}
}
@@ -93,7 +93,7 @@ static void ac_status_changed(void)
if (psy) {
power_supply_changed(psy);
- put_device(psy->dev);
+ power_supply_put(psy);
}
}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 9947985..3b6ec42 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
struct reset_args reset_args;
reset_args.sender = sender;
- cpus_clear(*mask);
+ cpumask_clear(mask);
/* find a single cpu for each uvhub in this distribution mask */
maskbits = sizeof(struct pnmask) * BITSPERBYTE;
/* each bit is a pnode relative to the partition base pnode */
@@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
continue;
apnode = pnode + bcp->partition_base_pnode;
cpu = pnode_to_first_cpu(apnode, smaster);
- cpu_set(cpu, *mask);
+ cpumask_set_cpu(cpu, mask);
}
/* IPI all cpus; preemption is already disabled */
@@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
/* don't actually do a shootdown of the local cpu */
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
- if (cpu_isset(cpu, *cpumask))
+ if (cpumask_test_cpu(cpu, cpumask))
stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 3e32ed5..757678f 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -134,7 +134,7 @@ static void do_fpu_end(void)
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
- struct tss_struct *t = &per_cpu(init_tss, cpu);
+ struct tss_struct *t = &per_cpu(cpu_tss, cpu);
#ifdef CONFIG_X86_64
struct desc_struct *desc = get_cpu_gdt_table(cpu);
tss_desc tss;
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index b3560ec..ef8187f 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -119,7 +119,7 @@
110 i386 iopl sys_iopl
111 i386 vhangup sys_vhangup
112 i386 idle
-113 i386 vm86old sys_vm86old sys32_vm86_warning
+113 i386 vm86old sys_vm86old sys_ni_syscall
114 i386 wait4 sys_wait4 compat_sys_wait4
115 i386 swapoff sys_swapoff
116 i386 sysinfo sys_sysinfo compat_sys_sysinfo
@@ -172,7 +172,7 @@
163 i386 mremap sys_mremap
164 i386 setresuid sys_setresuid16
165 i386 getresuid sys_getresuid16
-166 i386 vm86 sys_vm86 sys32_vm86_warning
+166 i386 vm86 sys_vm86 sys_ni_syscall
167 i386 query_module
168 i386 poll sys_poll
169 i386 nfsservctl
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 8d656fb..9ef32d5 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -178,7 +178,7 @@
169 common reboot sys_reboot
170 common sethostname sys_sethostname
171 common setdomainname sys_setdomainname
-172 common iopl stub_iopl
+172 common iopl sys_iopl
173 common ioperm sys_ioperm
174 64 create_module
175 common init_module sys_init_module
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 2d7d9a1..8ffd214 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -64,8 +64,8 @@
*/
static inline void rdtsc_barrier(void)
{
- alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
- alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+ alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+ "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 5cdfa9d..a75d8700 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -16,7 +16,7 @@
*/
/* Not going to be implemented by UML, since we have no hardware. */
-#define stub_iopl sys_ni_syscall
+#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
/*
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 7b9be98..275a3a8 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
@@ -206,4 +206,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
PHONY += vdso_install $(vdso_img_insttargets)
vdso_install: $(vdso_img_insttargets) FORCE
-clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64*
+clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 9793322..40d2473 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
+ u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * Note: hypervisor must guarantee that:
- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
- * 2. that per-CPU pvclock time info is updated if the
- * underlying CPU changes.
- * 3. that version is increased whenever underlying CPU
- * changes.
- *
+ * When looping to get a consistent (time-info, tsc) pair, we
+ * also need to deal with the possibility we can switch vcpus,
+ * so make sure we always re-fetch time-info for the current vcpu.
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/
- pvti = get_pvti(cpu);
+ /* Make sure migrate_count will change if we leave the VCPU. */
+ do {
+ pvti = get_pvti(cpu);
+ migrate_count = pvti->migrate_count;
+
+ cpu1 = cpu;
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1));
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
* Test we're still on the cpu as well as the version.
- * We could have been migrated just after the first
- * vgetcpu but before fetching the version, so we
- * wouldn't notice a version change.
+ * - We must read TSC of pvti's VCPU.
+ * - KVM doesn't follow the versioning protocol, so data could
+ * change before version if we left the VCPU.
*/
- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1 ||
- (pvti->pvti.version & 1) ||
- pvti->pvti.version != version));
+ smp_rmb();
+ } while (unlikely((pvti->pvti.version & 1) ||
+ pvti->pvti.version != version ||
+ pvti->migrate_count != migrate_count));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
index 31776d0..d7ec4e2 100644
--- a/arch/x86/vdso/vdso32/sigreturn.S
+++ b/arch/x86/vdso/vdso32/sigreturn.S
@@ -17,6 +17,7 @@
.text
.globl __kernel_sigreturn
.type __kernel_sigreturn,@function
+ nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
ALIGN
__kernel_sigreturn:
.LSTART_sigreturn:
diff --git a/arch/x86/vdso/vdso32/syscall.S b/arch/x86/vdso/vdso32/syscall.S
index 5415b56..6b286bb 100644
--- a/arch/x86/vdso/vdso32/syscall.S
+++ b/arch/x86/vdso/vdso32/syscall.S
@@ -19,8 +19,6 @@ __kernel_vsyscall:
.Lpush_ebp:
movl %ecx, %ebp
syscall
- movl $__USER32_DS, %ecx
- movl %ecx, %ss
movl %ebp, %ecx
popl %ebp
.Lpop_ebp:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 5240f56..81665c9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -912,6 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss,
mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU);
+ tss->x86_tss.sp0 = thread->sp0;
}
static void xen_set_iopl_mask(unsigned mask)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index adca9e2..65083ad 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
@@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
/*
* (Yet another) pagetable walker. This one is intended for pinning a
@@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD);
}
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
#endif
@@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
pv_mmu_ops.release_pte = xen_release_pte;
pv_mmu_ops.release_pmd = xen_release_pmd;
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
@@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
-#if PAGETABLE_LEVELS == 4
+#if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
-#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 9f93af5..b47124d 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
+ unsigned long p2m_limit;
+ p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
- vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+ vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 08e8489..8648438 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -90,14 +90,10 @@ static void cpu_bringup(void)
set_cpu_online(cpu, true);
- this_cpu_write(cpu_state, CPU_ONLINE);
-
- wmb();
+ cpu_set_state_online(cpu); /* Implies full memory barrier. */
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
-
- wmb(); /* make sure everything is out */
}
/*
@@ -445,21 +441,19 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
{
int rc;
- per_cpu(current_task, cpu) = idle;
-#ifdef CONFIG_X86_32
- irq_ctx_init(cpu);
-#else
- clear_tsk_thread_flag(idle, TIF_FORK);
-#endif
- per_cpu(kernel_stack, cpu) =
- (unsigned long)task_stack_page(idle) -
- KERNEL_STACK_OFFSET + THREAD_SIZE;
+ common_cpu_up(cpu, idle);
xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+ /*
+ * PV VCPUs are always successfully taken down (see 'while' loop
+ * in xen_cpu_die()), so -EBUSY is an error.
+ */
+ rc = cpu_check_up_prepare(cpu);
+ if (rc)
+ return rc;
/* make sure interrupts start blocked */
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -468,10 +462,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
if (rc)
return rc;
- if (num_online_cpus() == 1)
- /* Just in case we booted with a single CPU. */
- alternatives_enable_smp();
-
rc = xen_smp_intr_init(cpu);
if (rc)
return rc;
@@ -479,10 +469,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
BUG_ON(rc);
- while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
+ while (cpu_report_state(cpu) != CPU_ONLINE)
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
- barrier();
- }
return 0;
}
@@ -511,11 +499,11 @@ static void xen_cpu_die(unsigned int cpu)
schedule_timeout(HZ/10);
}
- cpu_die_common(cpu);
-
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
+ if (common_cpu_die(cpu) == 0) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ }
}
static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -747,6 +735,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
+
+ /*
+ * This can happen if CPU was offlined earlier and
+ * offlining timed out in common_cpu_die().
+ */
+ if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ }
+
/*
* xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before
@@ -768,12 +766,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
return rc;
}
-static void xen_hvm_cpu_die(unsigned int cpu)
-{
- xen_cpu_die(cpu);
- native_cpu_die(cpu);
-}
-
void __init xen_hvm_smp_init(void)
{
if (!xen_have_vector_callback)
@@ -781,7 +773,7 @@ void __init xen_hvm_smp_init(void)
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.cpu_up = xen_hvm_cpu_up;
- smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index c4df9db..d949769 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,5 +1,5 @@
#include <linux/types.h>
-#include <linux/clockchips.h>
+#include <linux/tick.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
@@ -81,17 +81,14 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data)
{
- unsigned long reason = (unsigned long)data;
-
/* Boot processor notified via generic timekeeping_resume() */
- if ( smp_processor_id() == 0)
+ if (smp_processor_id() == 0)
return;
- clockevents_notify(reason, NULL);
+ tick_resume_local();
}
void xen_arch_resume(void)
{
- on_each_cpu(xen_vcpu_notify_restore,
- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
+ on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
}
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 53adefd..985fc3e 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -68,11 +68,11 @@ ENTRY(xen_sysret64)
* We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back
*/
- movq %rsp, PER_CPU_VAR(old_rsp)
+ movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(kernel_stack), %rsp
pushq $__USER_DS
- pushq PER_CPU_VAR(old_rsp)
+ pushq PER_CPU_VAR(rsp_scratch)
pushq %r11
pushq $__USER_CS
pushq %rcx
@@ -87,11 +87,11 @@ ENTRY(xen_sysret32)
* We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back
*/
- movq %rsp, PER_CPU_VAR(old_rsp)
+ movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(kernel_stack), %rsp
pushq $__USER32_DS
- pushq PER_CPU_VAR(old_rsp)
+ pushq PER_CPU_VAR(rsp_scratch)
pushq %r11
pushq $__USER32_CS
pushq %rcx
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 5b34033..b848cc3 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -174,7 +174,7 @@ static int __init pcibios_init(void)
struct pci_controller *pci_ctrl;
struct list_head resources;
struct pci_bus *bus;
- int next_busno = 0;
+ int next_busno = 0, ret;
printk("PCI: Probing PCI hardware\n");
@@ -185,14 +185,25 @@ static int __init pcibios_init(void)
pci_controller_apertures(pci_ctrl, &resources);
bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
pci_ctrl->ops, pci_ctrl, &resources);
+ if (!bus)
+ continue;
+
pci_ctrl->bus = bus;
pci_ctrl->last_busno = bus->busn_res.end;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
pci_bus_count = next_busno;
+ ret = platform_pcibios_fixup();
+ if (ret)
+ return ret;
- return platform_pcibios_fixup();
+ for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
+ if (pci_ctrl->bus)
+ pci_bus_add_devices(pci_ctrl->bus);
+ }
+
+ return 0;
}
subsys_initcall(pcibios_init);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 3d733ba..6b37904 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -405,11 +405,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp;
- /* Set access mode to USER_DS. Nomenclature is outdated, but
- * functionality is used in uaccess.h
- */
- set_fs(USER_DS);
-
#if DEBUG_SIG
printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
current->comm, current->pid, signal, frame, regs->pc);