summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-11-01 21:17:16 (GMT)
committerScott Wood <scottwood@freescale.com>2013-11-03 22:47:10 (GMT)
commit31110de40dca4d4aeff4f253b3def948b88fa590 (patch)
tree0d811783836d52f15e37b4244de54f44ed4f93ad /arch/powerpc
parentae60d5d27c429b13cf28a09ab8b9d30682433c5a (diff)
parent8bb495e3f02401ee6f76d1b1d77f3ac9f079e376 (diff)
downloadlinux-fsl-qoriq-31110de40dca4d4aeff4f253b3def948b88fa590.tar.xz
Merge tag 'v3.10' into sdk-kernel-3.10
git rebase --continue Linux 3.10 Conflicts: Documentation/virtual/kvm/api.txt arch/ia64/kvm/Makefile arch/powerpc/Kconfig arch/powerpc/Makefile arch/powerpc/boot/dts/b4420qds.dts arch/powerpc/boot/dts/b4860qds.dts arch/powerpc/boot/dts/b4qds.dts arch/powerpc/boot/dts/fsl/b4420si-post.dtsi arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi arch/powerpc/boot/dts/fsl/b4860si-post.dtsi arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi arch/powerpc/boot/dts/fsl/b4si-post.dtsi arch/powerpc/boot/dts/fsl/p1010si-post.dtsi arch/powerpc/boot/dts/fsl/p2041si-post.dtsi arch/powerpc/boot/dts/fsl/p3041si-post.dtsi arch/powerpc/boot/dts/fsl/p4080si-post.dtsi arch/powerpc/boot/dts/fsl/p5020si-post.dtsi arch/powerpc/boot/dts/fsl/p5040si-post.dtsi arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi arch/powerpc/boot/dts/fsl/qoriq-sec5.0-0.dtsi arch/powerpc/boot/dts/fsl/t4240si-post.dtsi arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi arch/powerpc/boot/dts/p1025rdb_36b.dts arch/powerpc/boot/dts/t4240qds.dts arch/powerpc/configs/corenet64_smp_defconfig arch/powerpc/configs/mpc85xx_defconfig arch/powerpc/configs/mpc85xx_smp_defconfig arch/powerpc/include/asm/cputable.h arch/powerpc/include/asm/kvm_host.h arch/powerpc/include/asm/kvm_ppc.h arch/powerpc/include/asm/machdep.h arch/powerpc/include/uapi/asm/kvm.h arch/powerpc/kernel/cpu_setup_fsl_booke.S arch/powerpc/kernel/cputable.c arch/powerpc/kernel/idle.c arch/powerpc/kernel/pci-common.c arch/powerpc/kvm/Kconfig arch/powerpc/kvm/book3s.c arch/powerpc/kvm/booke.c arch/powerpc/kvm/e500.c arch/powerpc/kvm/e500_mmu.c arch/powerpc/kvm/e500_mmu_host.c arch/powerpc/kvm/e500mc.c arch/powerpc/kvm/emulate.c arch/powerpc/kvm/irq.h arch/powerpc/kvm/mpic.c arch/powerpc/kvm/powerpc.c arch/powerpc/mm/tlb_nohash.c arch/powerpc/platforms/85xx/Kconfig arch/powerpc/platforms/85xx/b4_qds.c arch/powerpc/platforms/85xx/t4240_qds.c arch/powerpc/platforms/pseries/smp.c arch/powerpc/sysdev/fsl_85xx_l2ctlr.c arch/powerpc/sysdev/fsl_msi.c arch/powerpc/sysdev/fsl_pci.c arch/powerpc/sysdev/fsl_pci.h arch/powerpc/sysdev/mpic.c arch/x86/kvm/Makefile arch/x86/kvm/x86.c drivers/Kconfig drivers/clk/Kconfig drivers/cpufreq/Makefile drivers/crypto/caam/caamalg.c drivers/crypto/caam/intern.h drivers/crypto/caam/jr.c drivers/crypto/caam/regs.h drivers/infiniband/ulp/ipoib/ipoib_ethtool.c drivers/iommu/Makefile drivers/iommu/amd_iommu.c drivers/iommu/exynos-iommu.c drivers/iommu/intel-iommu.c drivers/iommu/iommu.c drivers/iommu/msm_iommu.c drivers/iommu/omap-iommu.c drivers/iommu/tegra-gart.c drivers/iommu/tegra-smmu.c drivers/misc/Makefile drivers/mmc/card/block.c drivers/mmc/card/queue.c drivers/mmc/core/core.c drivers/mtd/nand/fsl_ifc_nand.c drivers/net/ethernet/3com/3c501.c drivers/net/ethernet/8390/3c503.c drivers/net/ethernet/dec/ewrk3.c drivers/net/ethernet/freescale/fec.c drivers/net/ethernet/freescale/gianfar.c drivers/net/ethernet/freescale/gianfar.h drivers/net/ethernet/i825xx/3c505.c drivers/net/ethernet/i825xx/3c507.c drivers/rtc/rtc-ds3232.c drivers/s390/net/qeth_core_main.c drivers/staging/Kconfig drivers/staging/Makefile drivers/staging/ccg/u_ether.c drivers/usb/gadget/fsl_udc_core.c drivers/usb/otg/fsl_otg.c drivers/vfio/vfio.c drivers/watchdog/Kconfig include/linux/iommu.h include/linux/kvm_host.h include/linux/mmc/sdhci.h include/linux/msi.h include/linux/netdev_features.h include/linux/pci.h include/linux/skbuff.h include/net/ip6_route.h include/net/sch_generic.h include/net/xfrm.h include/uapi/linux/kvm.h net/core/netpoll.c virt/kvm/irqchip.c virt/kvm/kvm_main.c
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig48
-rw-r--r--arch/powerpc/Kconfig.debug23
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/boot/dts/a3m071.dts6
-rw-r--r--arch/powerpc/boot/dts/a4m072.dts27
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts392
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts8
-rw-r--r--arch/powerpc/boot/dts/cm5200.dts6
-rw-r--r--arch/powerpc/boot/dts/digsy_mtc.dts14
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi5
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi20
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi32
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts23
-rw-r--r--arch/powerpc/boot/dts/media5200.dts6
-rw-r--r--arch/powerpc/boot/dts/motionpro.dts26
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi412
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts321
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts233
-rw-r--r--arch/powerpc/boot/dts/mpc5200b.dtsi25
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds_36b.dts6
-rw-r--r--arch/powerpc/boot/dts/mucmc52.dts48
-rw-r--r--arch/powerpc/boot/dts/o2d.dtsi27
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts48
-rw-r--r--arch/powerpc/boot/dts/pcm032.dts45
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts275
-rw-r--r--arch/powerpc/boot/dts/ppa8548.dts166
-rw-r--r--arch/powerpc/boot/dts/sbc8548-altflash.dts115
-rw-r--r--arch/powerpc/boot/dts/sbc8548-post.dtsi295
-rw-r--r--arch/powerpc/boot/dts/sbc8548-pre.dtsi52
-rw-r--r--arch/powerpc/boot/dts/sbc8548.dts356
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts12
-rw-r--r--arch/powerpc/boot/dts/uc101.dts52
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts6
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/ppa8548_defconfig65
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig19
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig31
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig35
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig31
-rw-r--r--arch/powerpc/configs/pasemi_defconfig31
-rw-r--r--arch/powerpc/configs/ppc64_defconfig147
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig107
-rw-r--r--arch/powerpc/configs/ps3_defconfig18
-rw-r--r--arch/powerpc/configs/pseries_defconfig91
-rw-r--r--arch/powerpc/crypto/Makefile9
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S179
-rw-r--r--arch/powerpc/crypto/sha1.c157
-rw-r--r--arch/powerpc/include/asm/bitops.h13
-rw-r--r--arch/powerpc/include/asm/context_tracking.h10
-rw-r--r--arch/powerpc/include/asm/cputable.h144
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/powerpc/include/asm/dbell.h30
-rw-r--r--arch/powerpc/include/asm/debug.h15
-rw-r--r--arch/powerpc/include/asm/dma.h5
-rw-r--r--arch/powerpc/include/asm/eeh.h3
-rw-r--r--arch/powerpc/include/asm/elf.h3
-rw-r--r--arch/powerpc/include/asm/exception-64s.h140
-rw-r--r--arch/powerpc/include/asm/firmware.h11
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/hugetlb.h31
-rw-r--r--arch/powerpc/include/asm/hvcall.h13
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h35
-rw-r--r--arch/powerpc/include/asm/io.h4
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h12
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h5
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h13
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h11
-rw-r--r--arch/powerpc/include/asm/kvm_host.h21
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h70
-rw-r--r--arch/powerpc/include/asm/linkage.h13
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h10
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h22
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h188
-rw-r--r--arch/powerpc/include/asm/mpc5121.h17
-rw-r--r--arch/powerpc/include/asm/opal.h7
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/page.h19
-rw-r--r--arch/powerpc/include/asm/page_64.h3
-rw-r--r--arch/powerpc/include/asm/parport.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h45
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h45
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h158
-rw-r--r--arch/powerpc/include/asm/pgalloc.h46
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/include/asm/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h25
-rw-r--r--arch/powerpc/include/asm/ppc4xx_ocm.h45
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h125
-rw-r--r--arch/powerpc/include/asm/processor.h79
-rw-r--r--arch/powerpc/include/asm/prom.h69
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h3
-rw-r--r--arch/powerpc/include/asm/reg.h57
-rw-r--r--arch/powerpc/include/asm/rtas.h8
-rw-r--r--arch/powerpc/include/asm/sections.h3
-rw-r--r--arch/powerpc/include/asm/signal.h4
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/syscalls.h15
-rw-r--r--arch/powerpc/include/asm/systbl.h76
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/tm.h22
-rw-r--r--arch/powerpc/include/asm/topology.h5
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h12
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/include/asm/xics.h1
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h9
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h34
-rw-r--r--arch/powerpc/include/uapi/asm/linkage.h6
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h30
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h6
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h6
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h18
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c46
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S45
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/crash_dump.c5
-rw-r--r--arch/powerpc/kernel/dbell.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S4
-rw-r--r--arch/powerpc/kernel/entry_64.S79
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S3
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S645
-rw-r--r--arch/powerpc/kernel/fadump.c5
-rw-r--r--arch/powerpc/kernel/fpu.S66
-rw-r--r--arch/powerpc/kernel/head_40x.S47
-rw-r--r--arch/powerpc/kernel/head_64.S20
-rw-r--r--arch/powerpc/kernel/head_booke.h5
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c84
-rw-r--r--arch/powerpc/kernel/idle.c91
-rw-r--r--arch/powerpc/kernel/iommu.c9
-rw-r--r--arch/powerpc/kernel/irq.c25
-rw-r--r--arch/powerpc/kernel/kgdb.c12
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c9
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S11
-rw-r--r--arch/powerpc/kernel/nvram_64.c3
-rw-r--r--arch/powerpc/kernel/of_platform.c7
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c164
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c12
-rw-r--r--arch/powerpc/kernel/pci_dn.c8
-rw-r--r--arch/powerpc/kernel/ppc32.h26
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c21
-rw-r--r--arch/powerpc/kernel/process.c273
-rw-r--r--arch/powerpc/kernel/prom_init.c724
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh2
-rw-r--r--arch/powerpc/kernel/ptrace.c118
-rw-r--r--arch/powerpc/kernel/ptrace32.c23
-rw-r--r--arch/powerpc/kernel/rtas.c113
-rw-r--r--arch/powerpc/kernel/rtas_flash.c507
-rw-r--r--arch/powerpc/kernel/rtas_pci.c4
-rw-r--r--arch/powerpc/kernel/rtasd.c49
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c22
-rw-r--r--arch/powerpc/kernel/signal.c53
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c729
-rw-r--r--arch/powerpc/kernel/signal_64.c343
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c436
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c11
-rw-r--r--arch/powerpc/kernel/tm.S390
-rw-r--r--arch/powerpc/kernel/traps.c277
-rw-r--r--arch/powerpc/kernel/udbg.c9
-rw-r--r--arch/powerpc/kernel/uprobes.c29
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S26
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S26
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S1
-rw-r--r--arch/powerpc/kernel/vector.S51
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/kvm/Kconfig8
-rw-r--r--arch/powerpc/kvm/Makefile9
-rw-r--r--arch/powerpc/kvm/book3s.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c122
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c106
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c406
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S237
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c18
-rw-r--r--arch/powerpc/kvm/book3s_pr.c24
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c23
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c274
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1299
-rw-r--r--arch/powerpc/kvm/book3s_xics.h130
-rw-r--r--arch/powerpc/kvm/booke.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c7
-rw-r--r--arch/powerpc/kvm/irq.h3
-rw-r--r--arch/powerpc/kvm/mpic.c5
-rw-r--r--arch/powerpc/kvm/powerpc.c52
-rw-r--r--arch/powerpc/lib/Makefile4
-rw-r--r--arch/powerpc/lib/copypage_power7.S19
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/mm/fault.c45
-rw-r--r--arch/powerpc/mm/gup.c18
-rw-r--r--arch/powerpc/mm/hash_low_64.S22
-rw-r--r--arch/powerpc/mm/hash_native_64.c198
-rw-r--r--arch/powerpc/mm/hash_utils_64.c256
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c33
-rw-r--r--arch/powerpc/mm/hugetlbpage.c198
-rw-r--r--arch/powerpc/mm/icswx.c2
-rw-r--r--arch/powerpc/mm/init_64.c20
-rw-r--r--arch/powerpc/mm/mem.c61
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c48
-rw-r--r--arch/powerpc/mm/numa.c288
-rw-r--r--arch/powerpc/mm/pgtable_64.c120
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/slice.c223
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c12
-rw-r--r--arch/powerpc/perf/Makefile5
-rw-r--r--arch/powerpc/perf/bhrb.S44
-rw-r--r--arch/powerpc/perf/core-book3s.c391
-rw-r--r--arch/powerpc/perf/e500-pmu.c2
-rw-r--r--arch/powerpc/perf/power5+-pmu.c2
-rw-r--r--arch/powerpc/perf/power5-pmu.c1
-rw-r--r--arch/powerpc/perf/power7-pmu.c93
-rw-r--r--arch/powerpc/perf/power8-pmu.c592
-rw-r--r--arch/powerpc/platforms/40x/Kconfig8
-rw-r--r--arch/powerpc/platforms/44x/Kconfig11
-rw-r--r--arch/powerpc/platforms/512x/Kconfig10
-rw-r--r--arch/powerpc/platforms/512x/Makefile2
-rw-r--r--arch/powerpc/platforms/512x/clock.c43
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h1
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c (renamed from arch/powerpc/platforms/512x/mpc5121_generic.c)12
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c70
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c6
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c6
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c4
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c161
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig18
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c98
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c176
-rw-r--r--arch/powerpc/platforms/85xx/t4240_qds.c4
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype12
-rw-r--r--arch/powerpc/platforms/cell/Kconfig26
-rw-r--r--arch/powerpc/platforms/cell/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c26
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c209
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.h24
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c115
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c156
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c5
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c13
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c68
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c20
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc10x.h11
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c5
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c14
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c5
-rw-r--r--arch/powerpc/platforms/powermac/pci.c2
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig6
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c45
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c351
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c15
-rw-r--r--arch/powerpc/platforms/powernv/pci.c125
-rw-r--r--arch/powerpc/platforms/powernv/pci.h26
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c66
-rw-r--r--arch/powerpc/platforms/prep/Kconfig23
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/ps3/time.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c6
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c24
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c3
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c69
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c128
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c16
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c41
-rw-r--r--arch/powerpc/platforms/pseries/msi.c70
-rw-r--r--arch/powerpc/platforms/pseries/pci.c56
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h33
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c34
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h9
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c37
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c36
-rw-r--r--arch/powerpc/platforms/pseries/setup.c68
-rw-r--r--arch/powerpc/platforms/pseries/smp.c33
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c22
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig5
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c2
-rw-r--r--arch/powerpc/sysdev/Kconfig1
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/bestcomm/Kconfig36
-rw-r--r--arch/powerpc/sysdev/bestcomm/Makefile14
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.c157
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.h30
-rw-r--r--arch/powerpc/sysdev/bestcomm/bcom_ata_task.c67
-rw-r--r--arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c78
-rw-r--r--arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c91
-rw-r--r--arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c63
-rw-r--r--arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c69
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c531
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h213
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h350
-rw-r--r--arch/powerpc/sysdev/bestcomm/fec.c270
-rw-r--r--arch/powerpc/sysdev/bestcomm/fec.h61
-rw-r--r--arch/powerpc/sysdev/bestcomm/gen_bd.c354
-rw-r--r--arch/powerpc/sysdev/bestcomm/gen_bd.h53
-rw-r--r--arch/powerpc/sysdev/bestcomm/sram.c178
-rw-r--r--arch/powerpc/sysdev/bestcomm/sram.h54
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c41
-rw-r--r--arch/powerpc/sysdev/mpc5xxx_clocks.c4
-rw-r--r--arch/powerpc/sysdev/mpic.c16
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c16
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c415
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c15
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c5
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c105
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c10
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c2
-rw-r--r--arch/powerpc/xmon/xmon.c25
367 files changed, 14753 insertions, 8716 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 059d8f2..1c08871 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,17 +82,10 @@ config GENERIC_HWEIGHT
bool
default y
-config GENERIC_GPIO
- bool
- help
- Generic GPIO API support
-
-config ARCH_NO_VIRT_TO_BUS
- def_bool PPC64
-
config PPC
bool
default y
+ select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
select HAVE_FTRACE_MCOUNT_RECORD
@@ -101,6 +94,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -115,17 +109,14 @@ config PPC
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
- select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS
select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ
- select IRQ_PER_CPU
select IRQ_DOMAIN
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -144,6 +135,9 @@ config PPC
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
+ select ARCH_USE_BUILTIN_BSWAP
+ select OLD_SIGSUSPEND
+ select OLD_SIGACTION if PPC32
config EARLY_PRINTK
bool
@@ -154,6 +148,7 @@ config COMPAT
default y if PPC64
select COMPAT_BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
config SYSVIPC_COMPAT
bool
@@ -275,6 +270,10 @@ config PPC_ADV_DEBUG_DAC_RANGE
depends on PPC_ADV_DEBUG_REGS && 44x
default y
+config PPC_EMULATE_SSTEP
+ bool
+ default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -308,6 +307,14 @@ config MATH_EMULATION
unit, which will allow programs that use floating-point
instructions to run.
+config PPC_TRANSACTIONAL_MEM
+ bool "Transactional Memory support for POWERPC"
+ depends on PPC_BOOK3S_64
+ depends on SMP
+ default n
+ ---help---
+ Support user-mode Transactional Memory on POWERPC.
+
config 8XX_MINIMAL_FPEMU
bool "Minimal math emulation for 8xx"
depends on 8xx && !MATH_EMULATION
@@ -415,11 +422,6 @@ config NODES_SHIFT
default "4"
depends on NEED_MULTIPLE_NODES
-config MAX_ACTIVE_REGIONS
- int
- default "256" if PPC64
- default "32"
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on PPC64
@@ -634,14 +636,14 @@ menu "Bus options"
config ISA
bool "Support for ISA-bus hardware"
- depends on PPC_PREP || PPC_CHRP
+ depends on PPC_CHRP
select PPC_I8259
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
- have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
- you have an embedded board, consult your board documentation.
+ have an IBM RS/6000 or pSeries machine, say Y. If you have an
+ embedded board, consult your board documentation.
config ZONE_DMA
bool
@@ -744,7 +746,6 @@ config PCI
bool "PCI support" if PPC_PCI_CHOICE
default y if !40x && !CPM2 && !8xx && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
- default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
select ARCH_SUPPORTS_MSI
select GENERIC_PCI_IOMAP
@@ -774,11 +775,6 @@ config PCI_8260
select PPC_INDIRECT_PCI
default y
-config 8260_PCI9
- bool "Enable workaround for MPC826x erratum PCI 9"
- depends on PCI_8260 && !8272
- default y
-
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
@@ -968,7 +964,7 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
- default "0x80000000" if PPC_PREP || PPC_8xx
+ default "0x80000000" if PPC_8xx
default "0xc0000000"
config CONSISTENT_SIZE_BOOL
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 90e85c6..99e3783 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -268,8 +268,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
Select this to enable early debugging for the PowerNV platform
using an "hvsi" console
+config PPC_EARLY_DEBUG_MEMCONS
+ bool "In memory console"
+ help
+ Select this to enable early debugging using an in memory console.
+ This console provides input and output buffers stored within the
+ kernel BSS and should be safe to select on any system. A debugger
+ can then be used to read kernel output or send input to the console.
endchoice
+config PPC_MEMCONS_OUTPUT_SIZE
+ int "In memory console output buffer size"
+ depends on PPC_EARLY_DEBUG_MEMCONS
+ default 4096
+ help
+ Selects the size of the output buffer (in bytes) of the in memory
+ console.
+
+config PPC_MEMCONS_INPUT_SIZE
+ int "In memory console input buffer size"
+ depends on PPC_EARLY_DEBUG_MEMCONS
+ default 128
+ help
+ Selects the size of the input buffer (in bytes) of the in memory
+ console.
+
config PPC_EARLY_DEBUG_OPAL
def_bool y
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1893fb9..1138208 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -88,7 +88,6 @@ endif
CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
-
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
@@ -176,6 +175,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
+head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
core-y += arch/powerpc/kernel/ \
arch/powerpc/mm/ \
@@ -183,6 +183,7 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/sysdev/ \
arch/powerpc/platforms/ \
arch/powerpc/math-emu/ \
+ arch/powerpc/crypto/ \
arch/powerpc/net/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_KVM) += arch/powerpc/kvm/
diff --git a/arch/powerpc/boot/dts/a3m071.dts b/arch/powerpc/boot/dts/a3m071.dts
index 877a28c..bf81b8f 100644
--- a/arch/powerpc/boot/dts/a3m071.dts
+++ b/arch/powerpc/boot/dts/a3m071.dts
@@ -17,6 +17,8 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+
/ {
model = "anonymous,a3m071";
compatible = "anonymous,a3m071";
@@ -30,10 +32,6 @@
bus-frequency = <0>; /* From boot loader */
system-frequency = <0>; /* From boot loader */
- timer@600 {
- fsl,has-wdt;
- };
-
spi@f00 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts
index fabe7b7..1f02034 100644
--- a/arch/powerpc/boot/dts/a4m072.dts
+++ b/arch/powerpc/boot/dts/a4m072.dts
@@ -15,6 +15,11 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+&gpt3 { gpio-controller; };
+&gpt4 { gpio-controller; };
+&gpt5 { gpio-controller; };
+
/ {
model = "anonymous,a4m072";
compatible = "anonymous,a4m072";
@@ -34,28 +39,6 @@
fsl,init-fd-counters = <0x3333>;
};
- timer@600 {
- fsl,has-wdt;
- };
-
- gpt3: timer@630 { /* General Purpose Timer in GPIO mode */
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt4: timer@640 { /* General Purpose Timer in GPIO mode */
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt5: timer@650 { /* General Purpose Timer in GPIO mode */
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
spi@f00 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
new file mode 100644
index 0000000..a27a460
--- /dev/null
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -0,0 +1,392 @@
+/*
+ * Device Tree Source for the MPC5121e based ac14xx board
+ *
+ * Copyright 2012 Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+/include/ "mpc5121.dtsi"
+
+/ {
+ model = "ac14xx";
+ compatible = "ifm,ac14xx", "fsl,mpc5121";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial7;
+ spi4 = &spi4;
+ spi5 = &spi5;
+ };
+
+ cpus {
+ PowerPC,5121@0 {
+ timebase-frequency = <40000000>; /* 40 MHz (csb/4) */
+ bus-frequency = <160000000>; /* 160 MHz csb bus */
+ clock-frequency = <400000000>; /* 400 MHz ppc core */
+ };
+ };
+
+ memory {
+ reg = <0x00000000 0x10000000>; /* 256MB at 0 */
+ };
+
+ nfc@40000000 {
+ status = "disabled";
+ };
+
+ localbus@80000020 {
+ ranges = <0x0 0x0 0xfc000000 0x04000000 /* CS0: NOR flash */
+ 0x1 0x0 0xe0000000 0x00010000 /* CS1: FRAM */
+ 0x2 0x0 0xe0100000 0x00080000 /* CS2: asi1 */
+ 0x3 0x0 0xe0300000 0x00020000 /* CS3: comm */
+ 0x5 0x0 0xe0400000 0x00010000 /* CS5: safety */
+ 0x6 0x0 0xe0200000 0x00080000>; /* CS6: asi2 */
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x04000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <2>;
+ device-width = <2>;
+
+ partition@0 {
+ label = "dtb-kernel-production";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@1 {
+ label = "filesystem-production";
+ reg = <0x00400000 0x03400000>;
+ };
+
+ partition@2 {
+ label = "recovery";
+ reg = <0x03800000 0x00700000>;
+ };
+
+ partition@3 {
+ label = "uboot-code";
+ reg = <0x03f00000 0x00040000>;
+ };
+ partition@4 {
+ label = "uboot-env1";
+ reg = <0x03f40000 0x00020000>;
+ };
+ partition@5 {
+ label = "uboot-env2";
+ reg = <0x03f60000 0x00020000>;
+ };
+ };
+
+ fram@1,0 {
+ compatible = "ifm,ac14xx-fram", "linux,uio-pdrv-genirq";
+ reg = <1 0x00000000 0x00010000>;
+ };
+
+ asi@2,0 {
+ /* masters mapping: CS, CS offset, size */
+ reg = <2 0x00000000 0x00080000
+ 6 0x00000000 0x00080000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ifm,ac14xx-asi-fpga";
+ gpios = <
+ &gpio_pic 26 0 /* prog */
+ &gpio_pic 27 0 /* done */
+ &gpio_pic 10 0 /* reset */
+ >;
+
+ master@1 {
+ interrupts = <20 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ chipselect = <2 0x00009000 0x00009100>;
+ label = "AS-i master 1";
+ };
+
+ master@2 {
+ interrupts = <21 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ chipselect = <6 0x00009000 0x00009100>;
+ label = "AS-i master 2";
+ };
+ };
+
+ netx@3,0 {
+ compatible = "ifm,netx";
+ reg = <0x3 0x00000000 0x00020000>;
+ chipselect = <3 0x00101140 0x00203100>;
+ interrupts = <17 0x8>;
+ gpios = <&gpio_pic 15 0>;
+ };
+
+ safety@5,0 {
+ compatible = "ifm,safety";
+ reg = <0x5 0x00000000 0x00010000>;
+ chipselect = <5 0x00009000 0x00009100>;
+ interrupts = <22 0x2>;
+ interrupt-parent = <&gpio_pic>;
+ gpios = <
+ &gpio_pic 12 0 /* prog */
+ &gpio_pic 11 0 /* done */
+ >;
+ };
+ };
+
+ soc@80000000 {
+
+ clock@f00 {
+ compatible = "fsl,mpc5121rev2-clock", "fsl,mpc5121-clock";
+ };
+
+ /*
+ * GPIO PIC:
+ * interrupts cell = <pin nr, sense>
+ * sense == 8: Level, low assertion
+ * sense == 2: Edge, high-to-low change
+ */
+ gpio_pic: gpio@1100 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sdhc@1500 {
+ cd-gpios = <&gpio_pic 23 0>; /* card detect */
+ wp-gpios = <&gpio_pic 24 0>; /* write protect */
+ wp-inverted; /* WP active high */
+ };
+
+ i2c@1700 {
+ /* use Fast-mode */
+ clock-frequency = <400000>;
+
+ at24@30 {
+ compatible = "at24,24c01";
+ reg = <0x30>;
+ };
+
+ at24@31 {
+ compatible = "at24,24c01";
+ reg = <0x31>;
+ };
+
+ temp@48 {
+ compatible = "ad,ad7414";
+ reg = <0x48>;
+ };
+
+ at24@50 {
+ compatible = "at24,24c01";
+ reg = <0x50>;
+ };
+
+ at24@51 {
+ compatible = "at24,24c01";
+ reg = <0x51>;
+ };
+
+ at24@52 {
+ compatible = "at24,24c01";
+ reg = <0x52>;
+ };
+
+ at24@53 {
+ compatible = "at24,24c01";
+ reg = <0x53>;
+ };
+
+ at24@54 {
+ compatible = "at24,24c01";
+ reg = <0x54>;
+ };
+
+ at24@55 {
+ compatible = "at24,24c01";
+ reg = <0x55>;
+ };
+
+ at24@56 {
+ compatible = "at24,24c01";
+ reg = <0x56>;
+ };
+
+ at24@57 {
+ compatible = "at24,24c01";
+ reg = <0x57>;
+ };
+
+ rtc@68 {
+ compatible = "stm,m41t00";
+ reg = <0x68>;
+ };
+ };
+
+ axe_pic: axe-base@2000 {
+ compatible = "fsl,mpc5121-axe-base";
+ reg = <0x2000 0x100>;
+ interrupts = <42 0x8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ axe-app {
+ compatible = "fsl,mpc5121-axe-app";
+ interrupt-parent = <&axe_pic>;
+ interrupts = <
+ /* soft interrupts */
+ 0 0x0 1 0x0 2 0x0 3 0x0
+ 4 0x0 5 0x0 6 0x0 7 0x0
+ /* fifo interrupts */
+ 8 0x0 9 0x0 10 0x0 11 0x0
+ >;
+ };
+
+ display@2100 {
+ edid = [00 FF FF FF FF FF FF 00 14 94 00 00 00 00 00 00
+ 0A 12 01 03 80 1C 23 78 CA 88 FF 94 52 54 8E 27
+ 1E 4C 50 00 00 00 01 01 01 01 01 01 01 01 01 01
+ 01 01 01 01 01 01 FB 00 B0 14 00 DC 05 00 08 04
+ 21 00 1C 23 00 00 00 18 00 00 00 FD 00 38 3C 1F
+ 3C 01 0A 20 20 20 20 20 20 20 00 00 00 FC 00 45
+ 54 30 31 38 30 30 33 44 4D 55 0A 0A 00 00 00 10
+ 00 41 30 30 30 30 30 30 30 30 30 30 30 31 00 D5];
+ };
+
+ can@2300 {
+ status = "disabled";
+ };
+
+ can@2380 {
+ status = "disabled";
+ };
+
+ viu@2400 {
+ status = "disabled";
+ };
+
+ mdio@2800 {
+ phy0: ethernet-phy@1f {
+ compatible = "smsc,lan8700";
+ reg = <0x1f>;
+ };
+ };
+
+ enet: ethernet@2800 {
+ phy-handle = <&phy0>;
+ };
+
+ usb@3000 {
+ status = "disabled";
+ };
+
+ usb@4000 {
+ status = "disabled";
+ };
+
+ /* PSC3 serial port A, aka ttyPSC0 */
+ serial0: psc@11300 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <512>;
+ fsl,tx-fifo-size = <512>;
+ };
+
+ /* PSC4 in SPI mode */
+ spi4: psc@11400 {
+ compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <768>;
+ fsl,tx-fifo-size = <768>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ num-cs = <1>;
+ cs-gpios = <&gpio_pic 25 0>;
+
+ flash: m25p128@0 {
+ compatible = "st,m25p128";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "spi-flash0";
+ reg = <0x00000000 0x01000000>;
+ };
+ };
+ };
+
+ /* PSC5 in SPI mode */
+ spi5: psc@11500 {
+ compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc";
+ fsl,mode = "spi-master";
+ fsl,rx-fifo-size = <128>;
+ fsl,tx-fifo-size = <128>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lcd@0 {
+ compatible = "ilitek,ili922x";
+ reg = <0>;
+ spi-max-frequency = <100000>;
+ spi-cpol;
+ spi-cpha;
+ };
+ };
+
+ /* PSC7 serial port C, aka ttyPSC2 */
+ serial7: psc@11700 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ fsl,rx-fifo-size = <512>;
+ fsl,tx-fifo-size = <512>;
+ };
+
+ matrix_keypad@0 {
+ compatible = "gpio-matrix-keypad";
+ debounce-delay-ms = <5>;
+ col-scan-delay-us = <1>;
+ gpio-activelow;
+ col-gpios-binary;
+ col-switch-delay-ms = <200>;
+
+ col-gpios = <&gpio_pic 1 0>; /* pin1 */
+
+ row-gpios = <&gpio_pic 2 0 /* pin2 */
+ &gpio_pic 3 0 /* pin3 */
+ &gpio_pic 4 0>; /* pin4 */
+
+ linux,keymap = <0x0000006e /* FN LEFT */
+ 0x01000067 /* UP */
+ 0x02000066 /* FN RIGHT */
+ 0x00010069 /* LEFT */
+ 0x0101006a /* DOWN */
+ 0x0201006c>; /* RIGHT */
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ backlight {
+ label = "backlight";
+ gpios = <&gpio_pic 0 0>;
+ default-state = "keep";
+ };
+ green {
+ label = "green";
+ gpios = <&gpio_pic 18 0>;
+ default-state = "keep";
+ };
+ red {
+ label = "red";
+ gpios = <&gpio_pic 19 0>;
+ default-state = "keep";
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 9d4917a..7daaca3 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -107,6 +107,14 @@
interrupt-parent = <&UIC0>;
};
+ OCM: ocm@400040000 {
+ compatible = "ibm,ocm";
+ status = "ok";
+ cell-index = <1>;
+ /* configured in U-Boot */
+ reg = <4 0x00040000 0x8000>; /* 32K */
+ };
+
SDR0: sdr {
compatible = "ibm,sdr-apm821xx";
dcr-reg = <0x00e 0x002>;
diff --git a/arch/powerpc/boot/dts/cm5200.dts b/arch/powerpc/boot/dts/cm5200.dts
index ad3a4f4..fb580dd 100644
--- a/arch/powerpc/boot/dts/cm5200.dts
+++ b/arch/powerpc/boot/dts/cm5200.dts
@@ -12,15 +12,13 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+
/ {
model = "schindler,cm5200";
compatible = "schindler,cm5200";
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
can@900 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index a7511f2..955bff6 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -13,6 +13,9 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { gpio-controller; fsl,has-wdt; };
+&gpt1 { gpio-controller; };
+
/ {
model = "intercontrol,digsy-mtc";
compatible = "intercontrol,digsy-mtc";
@@ -22,17 +25,6 @@
};
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- #gpio-cells = <2>;
- fsl,has-wdt;
- gpio-controller;
- };
-
- timer@610 {
- #gpio-cells = <2>;
- gpio-controller;
- };
-
rtc@800 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 4e8c887..751a29b 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -33,6 +33,7 @@
*/
/include/ "b4si-post.dtsi"
+
&bportals {
#address-cells = <0x1>;
#size-cells = <0x1>;
@@ -386,12 +387,14 @@
#address-cells = <2>;
#size-cells = <2>;
cell-index = <1>;
+ fsl,liodn-reg = <&guts 0x510>; /* RIO1LIODNR */
};
port2 {
#address-cells = <2>;
#size-cells = <2>;
cell-index = <2>;
+ fsl,liodn-reg = <&guts 0x514>; /* RIO2LIODNR */
};
};
@@ -454,7 +457,7 @@
};
corenet-cf@18000 {
- compatible = "fsl,corenet2-cf";
+ compatible = "fsl,b4860-corenet-cf";
};
/include/ "qoriq-fman3-0-1g-4.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 55ced62..b656757 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -49,11 +49,11 @@
interrupts = <20 2 0 0>;
fsl,iommu-parent = <&pamu0>;
pcie@0 {
- reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
+ reg = <0 0 0 0 0>;
interrupts = <20 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 1e1b810..1d87681 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -428,6 +428,7 @@
fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
port0;
};
+
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
@@ -436,16 +437,19 @@
dr_mode = "host";
phy_type = "utmi";
};
+
/include/ "qoriq-sata2-0.dtsi"
sata@220000 {
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */
};
+
/include/ "qoriq-sata2-1.dtsi"
sata@221000 {
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
};
+
/include/ "qoriq-sec4.2-0.dtsi"
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index 1e84f0f..c9b49aa 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -455,6 +455,7 @@
fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
port0;
};
+
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
@@ -463,16 +464,19 @@
dr_mode = "host";
phy_type = "utmi";
};
+
/include/ "qoriq-sata2-0.dtsi"
sata@220000 {
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */
};
+
/include/ "qoriq-sata2-1.dtsi"
sata@221000 {
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
};
+
/include/ "qoriq-sec4.2-0.dtsi"
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index ea5db06..7a48cd9 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -444,6 +444,7 @@
phy_type = "utmi";
port0;
};
+
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
@@ -452,11 +453,13 @@
dr_mode = "host";
phy_type = "utmi";
};
+
/include/ "qoriq-sata2-0.dtsi"
sata@220000 {
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */
};
+
/include/ "qoriq-sata2-1.dtsi"
sata@221000 {
fsl,iommu-parent = <&pamu1>;
@@ -531,4 +534,7 @@
};
};
/include/ "qoriq-raid1.0-0.dtsi"
+ raideng@320000 {
+ fsl,iommu-parent = <&pamu1>;
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 571eeab..54e35ce 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -416,6 +416,7 @@
phy_type = "utmi";
port0;
};
+
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
@@ -424,16 +425,19 @@
dr_mode = "host";
phy_type = "utmi";
};
+
/include/ "qoriq-sata2-0.dtsi"
sata@220000 {
fsl,iommu-parent = <&pamu4>;
fsl,liodn-reg = <&guts 0x550>; /* SATA1LIODNR */
};
+
/include/ "qoriq-sata2-1.dtsi"
sata@221000 {
fsl,iommu-parent = <&pamu4>;
fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
};
+
/include/ "qoriq-sec5.2-0.dtsi"
crypto@300000 {
fsl,iommu-parent = <&pamu4>;
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index f51545c..d22e728 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -6,13 +6,13 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
@@ -48,11 +48,11 @@
bus-range = <0x0 0xff>;
interrupts = <20 2 0 0>;
pcie@0 {
- reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
+ reg = <0 0 0 0 0>;
interrupts = <20 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
@@ -74,11 +74,11 @@
bus-range = <0 0xff>;
interrupts = <21 2 0 0>;
pcie@0 {
- reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
+ reg = <0 0 0 0 0>;
interrupts = <21 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
@@ -100,11 +100,11 @@
bus-range = <0x0 0xff>;
interrupts = <22 2 0 0>;
pcie@0 {
- reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
+ reg = <0 0 0 0 0>;
interrupts = <22 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
@@ -126,11 +126,11 @@
bus-range = <0x0 0xff>;
interrupts = <23 2 0 0>;
pcie@0 {
- reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
+ reg = <0 0 0 0 0>;
interrupts = <23 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
@@ -1058,7 +1058,7 @@
};
corenet-cf@18000 {
- compatible = "fsl,corenet2-cf";
+ compatible = "fsl,corenet-cf";
reg = <0x18000 0x1000>;
interrupts = <16 2 1 31>;
fsl,ccf-num-csdids = <32>;
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
index 82cca29..40f2845 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
@@ -6,13 +6,13 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
@@ -103,69 +103,69 @@
clocks = <&mux0>;
next-level-cache = <&L2_1>;
};
- cpu1: PowerPC,e6500@1 {
+ cpu1: PowerPC,e6500@2 {
device_type = "cpu";
reg = <2 3>;
clocks = <&mux0>;
next-level-cache = <&L2_1>;
};
- cpu2: PowerPC,e6500@2 {
+ cpu2: PowerPC,e6500@4 {
device_type = "cpu";
reg = <4 5>;
clocks = <&mux0>;
next-level-cache = <&L2_1>;
};
- cpu3: PowerPC,e6500@3 {
+ cpu3: PowerPC,e6500@6 {
device_type = "cpu";
reg = <6 7>;
clocks = <&mux0>;
next-level-cache = <&L2_1>;
};
- cpu4: PowerPC,e6500@4 {
+ cpu4: PowerPC,e6500@8 {
device_type = "cpu";
reg = <8 9>;
clocks = <&mux1>;
next-level-cache = <&L2_2>;
};
- cpu5: PowerPC,e6500@5 {
+ cpu5: PowerPC,e6500@10 {
device_type = "cpu";
reg = <10 11>;
clocks = <&mux1>;
next-level-cache = <&L2_2>;
};
- cpu6: PowerPC,e6500@6 {
+ cpu6: PowerPC,e6500@12 {
device_type = "cpu";
reg = <12 13>;
clocks = <&mux1>;
next-level-cache = <&L2_2>;
};
- cpu7: PowerPC,e6500@7 {
+ cpu7: PowerPC,e6500@14 {
device_type = "cpu";
reg = <14 15>;
clocks = <&mux1>;
next-level-cache = <&L2_2>;
};
- cpu8: PowerPC,e6500@8 {
+ cpu8: PowerPC,e6500@16 {
device_type = "cpu";
reg = <16 17>;
clocks = <&mux2>;
next-level-cache = <&L2_3>;
};
- cpu9: PowerPC,e6500@9 {
+ cpu9: PowerPC,e6500@18 {
device_type = "cpu";
reg = <18 19>;
clocks = <&mux2>;
next-level-cache = <&L2_3>;
};
- cpu10: PowerPC,e6500@10 {
+ cpu10: PowerPC,e6500@20 {
device_type = "cpu";
reg = <20 21>;
clocks = <&mux2>;
next-level-cache = <&L2_3>;
};
- cpu11: PowerPC,e6500@11 {
+ cpu11: PowerPC,e6500@22 {
device_type = "cpu";
reg = <22 23>;
clocks = <&mux2>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index fb288bb..5abb46c 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -12,19 +12,34 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+&gpt2 { gpio-controller; };
+&gpt3 { gpio-controller; };
+
/ {
model = "fsl,lite5200b";
compatible = "fsl,lite5200b";
+ leds {
+ compatible = "gpio-leds";
+ tmr2 {
+ gpios = <&gpt2 0 1>;
+ };
+ tmr3 {
+ gpios = <&gpt3 0 1>;
+ linux,default-trigger = "heartbeat";
+ };
+ led1 { gpios = <&gpio_wkup 2 1>; };
+ led2 { gpios = <&gpio_simple 3 1>; };
+ led3 { gpios = <&gpio_wkup 3 1>; };
+ led4 { gpios = <&gpio_simple 2 1>; };
+ };
+
memory {
reg = <0x00000000 0x10000000>; // 256MB
};
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
psc@2000 { // PSC1
compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
cell-index = <0>;
diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts
index 48d72f3..b5413cb 100644
--- a/arch/powerpc/boot/dts/media5200.dts
+++ b/arch/powerpc/boot/dts/media5200.dts
@@ -13,6 +13,8 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+
/ {
model = "fsl,media5200";
compatible = "fsl,media5200";
@@ -41,10 +43,6 @@
soc5200@f0000000 {
bus-frequency = <132000000>;// 132 MHz
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
psc@2000 { // PSC1
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
index 0b78e89..bbabd97 100644
--- a/arch/powerpc/boot/dts/motionpro.dts
+++ b/arch/powerpc/boot/dts/motionpro.dts
@@ -12,26 +12,22 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+&gpt6 { // Motion-PRO status LED
+ compatible = "promess,motionpro-led";
+ label = "motionpro-statusled";
+ blink-delay = <100>; // 100 msec
+};
+&gpt7 { // Motion-PRO ready LED
+ compatible = "promess,motionpro-led";
+ label = "motionpro-readyled";
+};
+
/ {
model = "promess,motionpro";
compatible = "promess,motionpro";
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
- timer@660 { // Motion-PRO status LED
- compatible = "promess,motionpro-led";
- label = "motionpro-statusled";
- blink-delay = <100>; // 100 msec
- };
-
- timer@670 { // Motion-PRO ready LED
- compatible = "promess,motionpro-led";
- label = "motionpro-readyled";
- };
-
can@900 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
new file mode 100644
index 0000000..bd14c00
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -0,0 +1,412 @@
+/*
+ * base MPC5121 Device Tree Source
+ *
+ * Copyright 2007-2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "mpc5121";
+ compatible = "fsl,mpc5121";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ipic>;
+
+ aliases {
+ ethernet0 = &eth0;
+ pci = &pci;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5121@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <0x20>; /* 32 bytes */
+ i-cache-line-size = <0x20>; /* 32 bytes */
+ d-cache-size = <0x8000>; /* L1, 32K */
+ i-cache-size = <0x8000>; /* L1, 32K */
+ timebase-frequency = <49500000>;/* 49.5 MHz (csb/4) */
+ bus-frequency = <198000000>; /* 198 MHz csb bus */
+ clock-frequency = <396000000>; /* 396 MHz ppc core */
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256MB at 0 */
+ };
+
+ mbx@20000000 {
+ compatible = "fsl,mpc5121-mbx";
+ reg = <0x20000000 0x4000>;
+ interrupts = <66 0x8>;
+ };
+
+ sram@30000000 {
+ compatible = "fsl,mpc5121-sram";
+ reg = <0x30000000 0x20000>; /* 128K at 0x30000000 */
+ };
+
+ nfc@40000000 {
+ compatible = "fsl,mpc5121-nfc";
+ reg = <0x40000000 0x100000>; /* 1M at 0x40000000 */
+ interrupts = <6 8>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ localbus@80000020 {
+ compatible = "fsl,mpc5121-localbus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0x80000020 0x40>;
+ interrupts = <7 0x8>;
+ ranges = <0x0 0x0 0xfc000000 0x04000000>;
+ };
+
+ soc@80000000 {
+ compatible = "fsl,mpc5121-immr";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ ranges = <0x0 0x80000000 0x400000>;
+ reg = <0x80000000 0x400000>;
+ bus-frequency = <66000000>; /* 66 MHz ips bus */
+
+
+ /*
+ * IPIC
+ * interrupts cell = <intr #, sense>
+ * sense values match linux IORESOURCE_IRQ_* defines:
+ * sense == 8: Level, low assertion
+ * sense == 2: Edge, high-to-low change
+ */
+ ipic: interrupt-controller@c00 {
+ compatible = "fsl,mpc5121-ipic", "fsl,ipic";
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0xc00 0x100>;
+ };
+
+ /* Watchdog timer */
+ wdt@900 {
+ compatible = "fsl,mpc5121-wdt";
+ reg = <0x900 0x100>;
+ };
+
+ /* Real time clock */
+ rtc@a00 {
+ compatible = "fsl,mpc5121-rtc";
+ reg = <0xa00 0x100>;
+ interrupts = <79 0x8 80 0x8>;
+ };
+
+ /* Reset module */
+ reset@e00 {
+ compatible = "fsl,mpc5121-reset";
+ reg = <0xe00 0x100>;
+ };
+
+ /* Clock control */
+ clock@f00 {
+ compatible = "fsl,mpc5121-clock";
+ reg = <0xf00 0x100>;
+ };
+
+ /* Power Management Controller */
+ pmc@1000{
+ compatible = "fsl,mpc5121-pmc";
+ reg = <0x1000 0x100>;
+ interrupts = <83 0x8>;
+ };
+
+ gpio@1100 {
+ compatible = "fsl,mpc5121-gpio";
+ reg = <0x1100 0x100>;
+ interrupts = <78 0x8>;
+ };
+
+ can@1300 {
+ compatible = "fsl,mpc5121-mscan";
+ reg = <0x1300 0x80>;
+ interrupts = <12 0x8>;
+ };
+
+ can@1380 {
+ compatible = "fsl,mpc5121-mscan";
+ reg = <0x1380 0x80>;
+ interrupts = <13 0x8>;
+ };
+
+ sdhc@1500 {
+ compatible = "fsl,mpc5121-sdhc";
+ reg = <0x1500 0x100>;
+ interrupts = <8 0x8>;
+ dmas = <&dma0 30>;
+ dma-names = "rx-tx";
+ };
+
+ i2c@1700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1700 0x20>;
+ interrupts = <9 0x8>;
+ };
+
+ i2c@1720 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1720 0x20>;
+ interrupts = <10 0x8>;
+ };
+
+ i2c@1740 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1740 0x20>;
+ interrupts = <11 0x8>;
+ };
+
+ i2ccontrol@1760 {
+ compatible = "fsl,mpc5121-i2c-ctrl";
+ reg = <0x1760 0x8>;
+ };
+
+ axe@2000 {
+ compatible = "fsl,mpc5121-axe";
+ reg = <0x2000 0x100>;
+ interrupts = <42 0x8>;
+ };
+
+ display@2100 {
+ compatible = "fsl,mpc5121-diu";
+ reg = <0x2100 0x100>;
+ interrupts = <64 0x8>;
+ };
+
+ can@2300 {
+ compatible = "fsl,mpc5121-mscan";
+ reg = <0x2300 0x80>;
+ interrupts = <90 0x8>;
+ };
+
+ can@2380 {
+ compatible = "fsl,mpc5121-mscan";
+ reg = <0x2380 0x80>;
+ interrupts = <91 0x8>;
+ };
+
+ viu@2400 {
+ compatible = "fsl,mpc5121-viu";
+ reg = <0x2400 0x400>;
+ interrupts = <67 0x8>;
+ };
+
+ mdio@2800 {
+ compatible = "fsl,mpc5121-fec-mdio";
+ reg = <0x2800 0x800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ eth0: ethernet@2800 {
+ device_type = "network";
+ compatible = "fsl,mpc5121-fec";
+ reg = <0x2800 0x800>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <4 0x8>;
+ };
+
+ /* USB1 using external ULPI PHY */
+ usb@3000 {
+ compatible = "fsl,mpc5121-usb2-dr";
+ reg = <0x3000 0x600>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <43 0x8>;
+ dr_mode = "otg";
+ phy_type = "ulpi";
+ };
+
+ /* USB0 using internal UTMI PHY */
+ usb@4000 {
+ compatible = "fsl,mpc5121-usb2-dr";
+ reg = <0x4000 0x600>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x8>;
+ dr_mode = "otg";
+ phy_type = "utmi_wide";
+ };
+
+ /* IO control */
+ ioctl@a000 {
+ compatible = "fsl,mpc5121-ioctl";
+ reg = <0xA000 0x1000>;
+ };
+
+ /* LocalPlus controller */
+ lpc@10000 {
+ compatible = "fsl,mpc5121-lpc";
+ reg = <0x10000 0x200>;
+ };
+
+ pata@10200 {
+ compatible = "fsl,mpc5121-pata";
+ reg = <0x10200 0x100>;
+ interrupts = <5 0x8>;
+ };
+
+ /* 512x PSCs are not 52xx PSC compatible */
+
+ /* PSC0 */
+ psc@11000 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11000 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC1 */
+ psc@11100 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11100 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC2 */
+ psc@11200 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11200 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC3 */
+ psc@11300 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ reg = <0x11300 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC4 */
+ psc@11400 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+ reg = <0x11400 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC5 */
+ psc@11500 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11500 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC6 */
+ psc@11600 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11600 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC7 */
+ psc@11700 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11700 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC8 */
+ psc@11800 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11800 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC9 */
+ psc@11900 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11900 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC10 */
+ psc@11a00 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11a00 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ /* PSC11 */
+ psc@11b00 {
+ compatible = "fsl,mpc5121-psc";
+ reg = <0x11b00 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ pscfifo@11f00 {
+ compatible = "fsl,mpc5121-psc-fifo";
+ reg = <0x11f00 0x100>;
+ interrupts = <40 0x8>;
+ };
+
+ dma0: dma@14000 {
+ compatible = "fsl,mpc5121-dma";
+ reg = <0x14000 0x1800>;
+ interrupts = <65 0x8>;
+ };
+ };
+
+ pci: pci@80008500 {
+ compatible = "fsl,mpc5121-pci";
+ device_type = "pci";
+ interrupts = <1 0x8>;
+ clock-frequency = <0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ reg = <0x80008500 0x100 /* internal registers */
+ 0x80008300 0x8>; /* config space access registers */
+ bus-range = <0x0 0x0>;
+ ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
+ 0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
+ 0x01000000 0x0 0x00000000 0x84000000 0x0 0x01000000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
index c9ef6bb..7d3cb79 100644
--- a/arch/powerpc/boot/dts/mpc5121ads.dts
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -1,7 +1,7 @@
/*
* MPC5121E ADS Device Tree Source
*
- * Copyright 2007,2008 Freescale Semiconductor Inc.
+ * Copyright 2007-2008 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -9,74 +9,26 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "mpc5121.dtsi"
/ {
model = "mpc5121ads";
- compatible = "fsl,mpc5121ads";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- pci = &pci;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,5121@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <0x20>; // 32 bytes
- i-cache-line-size = <0x20>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <49500000>;// 49.5 MHz (csb/4)
- bus-frequency = <198000000>; // 198 MHz csb bus
- clock-frequency = <396000000>; // 396 MHz ppc core
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x10000000>; // 256MB at 0
- };
-
- mbx@20000000 {
- compatible = "fsl,mpc5121-mbx";
- reg = <0x20000000 0x4000>;
- interrupts = <66 0x8>;
- interrupt-parent = < &ipic >;
- };
-
- sram@30000000 {
- compatible = "fsl,mpc5121-sram";
- reg = <0x30000000 0x20000>; // 128K at 0x30000000
- };
+ compatible = "fsl,mpc5121ads", "fsl,mpc5121";
nfc@40000000 {
- compatible = "fsl,mpc5121-nfc";
- reg = <0x40000000 0x100000>; // 1M at 0x40000000
- interrupts = <6 8>;
- interrupt-parent = < &ipic >;
- #address-cells = <1>;
- #size-cells = <1>;
- // ADS has two Hynix 512MB Nand flash chips in a single
- // stacked package.
+ /*
+ * ADS has two Hynix 512MB Nand flash chips in a single
+ * stacked package.
+ */
chips = <2>;
+
nand@0 {
label = "nand";
- reg = <0x00000000 0x40000000>; // 512MB + 512MB
+ reg = <0x00000000 0x40000000>; /* 512MB + 512MB */
};
};
localbus@80000020 {
- compatible = "fsl,mpc5121-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x80000020 0x40>;
-
ranges = <0x0 0x0 0xfc000000 0x04000000
0x2 0x0 0x82000000 0x00008000>;
@@ -87,6 +39,7 @@
#size-cells = <1>;
bank-width = <4>;
device-width = <2>;
+
protected@0 {
label = "protected";
reg = <0x00000000 0x00040000>; // first sector is protected
@@ -121,91 +74,18 @@
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x2 0xa 0x5>;
- interrupt-parent = < &ipic >;
- // irq routing
- // all irqs but touch screen are routed to irq0 (ipic 48)
- // touch screen is statically routed to irq1 (ipic 17)
- // so don't use it here
+ /* irq routing:
+ * all irqs but touch screen are routed to irq0 (ipic 48)
+ * touch screen is statically routed to irq1 (ipic 17)
+ * so don't use it here
+ */
interrupts = <48 0x8>;
};
};
soc@80000000 {
- compatible = "fsl,mpc5121-immr";
- #address-cells = <1>;
- #size-cells = <1>;
- #interrupt-cells = <2>;
- ranges = <0x0 0x80000000 0x400000>;
- reg = <0x80000000 0x400000>;
- bus-frequency = <66000000>; // 66 MHz ips bus
-
-
- // IPIC
- // interrupts cell = <intr #, sense>
- // sense values match linux IORESOURCE_IRQ_* defines:
- // sense == 8: Level, low assertion
- // sense == 2: Edge, high-to-low change
- //
- ipic: interrupt-controller@c00 {
- compatible = "fsl,mpc5121-ipic", "fsl,ipic";
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0xc00 0x100>;
- };
-
- rtc@a00 { // Real time clock
- compatible = "fsl,mpc5121-rtc";
- reg = <0xa00 0x100>;
- interrupts = <79 0x8 80 0x8>;
- interrupt-parent = < &ipic >;
- };
-
- reset@e00 { // Reset module
- compatible = "fsl,mpc5121-reset";
- reg = <0xe00 0x100>;
- };
-
- clock@f00 { // Clock control
- compatible = "fsl,mpc5121-clock";
- reg = <0xf00 0x100>;
- };
-
- pmc@1000{ //Power Management Controller
- compatible = "fsl,mpc5121-pmc";
- reg = <0x1000 0x100>;
- interrupts = <83 0x2>;
- interrupt-parent = < &ipic >;
- };
-
- gpio@1100 {
- compatible = "fsl,mpc5121-gpio";
- reg = <0x1100 0x100>;
- interrupts = <78 0x8>;
- interrupt-parent = < &ipic >;
- };
-
- can@1300 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <12 0x8>;
- interrupt-parent = < &ipic >;
- reg = <0x1300 0x80>;
- };
-
- can@1380 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <13 0x8>;
- interrupt-parent = < &ipic >;
- reg = <0x1380 0x80>;
- };
i2c@1700 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5121-i2c", "fsl-i2c";
- reg = <0x1700 0x20>;
- interrupts = <9 0x8>;
- interrupt-parent = < &ipic >;
fsl,preserve-clocking;
hwmon@4a {
@@ -224,196 +104,75 @@
};
};
- i2c@1720 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5121-i2c", "fsl-i2c";
- reg = <0x1720 0x20>;
- interrupts = <10 0x8>;
- interrupt-parent = < &ipic >;
- };
-
- i2c@1740 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5121-i2c", "fsl-i2c";
- reg = <0x1740 0x20>;
- interrupts = <11 0x8>;
- interrupt-parent = < &ipic >;
+ eth0: ethernet@2800 {
+ phy-handle = <&phy0>;
};
- i2ccontrol@1760 {
- compatible = "fsl,mpc5121-i2c-ctrl";
- reg = <0x1760 0x8>;
+ can@2300 {
+ status = "disabled";
};
- axe@2000 {
- compatible = "fsl,mpc5121-axe";
- reg = <0x2000 0x100>;
- interrupts = <42 0x8>;
- interrupt-parent = < &ipic >;
+ can@2380 {
+ status = "disabled";
};
- display@2100 {
- compatible = "fsl,mpc5121-diu";
- reg = <0x2100 0x100>;
- interrupts = <64 0x8>;
- interrupt-parent = < &ipic >;
+ viu@2400 {
+ status = "disabled";
};
mdio@2800 {
- compatible = "fsl,mpc5121-fec-mdio";
- reg = <0x2800 0x800>;
- #address-cells = <1>;
- #size-cells = <0>;
- phy: ethernet-phy@0 {
+ phy0: ethernet-phy@0 {
reg = <1>;
- device_type = "ethernet-phy";
};
};
- ethernet@2800 {
- device_type = "network";
- compatible = "fsl,mpc5121-fec";
- reg = <0x2800 0x800>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <4 0x8>;
- interrupt-parent = < &ipic >;
- phy-handle = < &phy >;
- fsl,align-tx-packets = <4>;
+ /* mpc5121ads only uses USB0 */
+ usb@3000 {
+ status = "disabled";
};
- // 5121e has two dr usb modules
- // mpc5121_ads only uses USB0
-
- // USB1 using external ULPI PHY
- //usb@3000 {
- // compatible = "fsl,mpc5121-usb2-dr";
- // reg = <0x3000 0x1000>;
- // #address-cells = <1>;
- // #size-cells = <0>;
- // interrupt-parent = < &ipic >;
- // interrupts = <43 0x8>;
- // dr_mode = "otg";
- // phy_type = "ulpi";
- //};
-
- // USB0 using internal UTMI PHY
+ /* USB0 using internal UTMI PHY */
usb@4000 {
- compatible = "fsl,mpc5121-usb2-dr";
- reg = <0x4000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = < &ipic >;
- interrupts = <44 0x8>;
- dr_mode = "otg";
- phy_type = "utmi_wide";
+ dr_mode = "host";
fsl,invert-drvvbus;
fsl,invert-pwr-fault;
};
- // IO control
- ioctl@a000 {
- compatible = "fsl,mpc5121-ioctl";
- reg = <0xA000 0x1000>;
- };
-
- pata@10200 {
- compatible = "fsl,mpc5121-pata";
- reg = <0x10200 0x100>;
- interrupts = <5 0x8>;
- interrupt-parent = < &ipic >;
- };
-
- // 512x PSCs are not 52xx PSC compatible
- // PSC3 serial port A aka ttyPSC0
- serial@11300 {
- device_type = "serial";
+ /* PSC3 serial port A aka ttyPSC0 */
+ psc@11300 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- // Logical port assignment needed until driver
- // learns to use aliases
- port-number = <0>;
- cell-index = <3>;
- reg = <0x11300 0x100>;
- interrupts = <40 0x8>;
- interrupt-parent = < &ipic >;
- rx-fifo-size = <16>;
- tx-fifo-size = <16>;
};
- // PSC4 serial port B aka ttyPSC1
- serial@11400 {
- device_type = "serial";
+ /* PSC4 serial port B aka ttyPSC1 */
+ psc@11400 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- // Logical port assignment needed until driver
- // learns to use aliases
- port-number = <1>;
- cell-index = <4>;
- reg = <0x11400 0x100>;
- interrupts = <40 0x8>;
- interrupt-parent = < &ipic >;
- rx-fifo-size = <16>;
- tx-fifo-size = <16>;
};
- // PSC5 in ac97 mode
- ac97@11500 {
+ /* PSC5 in ac97 mode */
+ ac97: psc@11500 {
compatible = "fsl,mpc5121-psc-ac97", "fsl,mpc5121-psc";
- cell-index = <5>;
- reg = <0x11500 0x100>;
- interrupts = <40 0x8>;
- interrupt-parent = < &ipic >;
fsl,mode = "ac97-slave";
- rx-fifo-size = <384>;
- tx-fifo-size = <384>;
- };
-
- pscfifo@11f00 {
- compatible = "fsl,mpc5121-psc-fifo";
- reg = <0x11f00 0x100>;
- interrupts = <40 0x8>;
- interrupt-parent = < &ipic >;
+ fsl,rx-fifo-size = <384>;
+ fsl,tx-fifo-size = <384>;
};
-
- dma@14000 {
- compatible = "fsl,mpc5121-dma";
- reg = <0x14000 0x1800>;
- interrupts = <65 0x8>;
- interrupt-parent = < &ipic >;
- };
-
};
pci: pci@80008500 {
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
- // IDSEL 0x15 - Slot 1 PCI
+ /* IDSEL 0x15 - Slot 1 PCI */
0xa800 0x0 0x0 0x1 &cpld_pic 0x0 0x8
0xa800 0x0 0x0 0x2 &cpld_pic 0x1 0x8
0xa800 0x0 0x0 0x3 &cpld_pic 0x2 0x8
0xa800 0x0 0x0 0x4 &cpld_pic 0x3 0x8
- // IDSEL 0x16 - Slot 2 MiniPCI
+ /* IDSEL 0x16 - Slot 2 MiniPCI */
0xb000 0x0 0x0 0x1 &cpld_pic 0x4 0x8
0xb000 0x0 0x0 0x2 &cpld_pic 0x5 0x8
- // IDSEL 0x17 - Slot 3 MiniPCI
+ /* IDSEL 0x17 - Slot 3 MiniPCI */
0xb800 0x0 0x0 0x1 &cpld_pic 0x6 0x8
0xb800 0x0 0x0 0x2 &cpld_pic 0x7 0x8
>;
- interrupt-parent = < &ipic >;
- interrupts = <1 0x8>;
- bus-range = <0 0>;
- ranges = <0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0x84000000 0x0 0x01000000>;
- clock-frequency = <0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0x80008500 0x100 /* internal registers */
- 0x80008300 0x8>; /* config space access registers */
- compatible = "fsl,mpc5121-pci";
- device_type = "pci";
};
};
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
new file mode 100644
index 0000000..4177b62
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -0,0 +1,233 @@
+/*
+ * STx/Freescale ADS5125 MPC5125 silicon
+ *
+ * Copyright (C) 2009 Freescale Semiconductor Inc. All rights reserved.
+ *
+ * Reworked by Matteo Facchinetti (engineering@sirius-es.it)
+ * Copyright (C) 2013 Sirius Electronic Systems
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "mpc5125twr"; // In BSP "mpc5125ads"
+ compatible = "fsl,mpc5125ads", "fsl,mpc5125";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ipic>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ ethernet0 = &eth0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5125@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <0x20>; // 32 bytes
+ i-cache-line-size = <0x20>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <49500000>;// 49.5 MHz (csb/4)
+ bus-frequency = <198000000>; // 198 MHz csb bus
+ clock-frequency = <396000000>; // 396 MHz ppc core
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; // 256MB at 0
+ };
+
+ sram@30000000 {
+ compatible = "fsl,mpc5121-sram";
+ reg = <0x30000000 0x08000>; // 32K at 0x30000000
+ };
+
+ soc@80000000 {
+ compatible = "fsl,mpc5121-immr";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ ranges = <0x0 0x80000000 0x400000>;
+ reg = <0x80000000 0x400000>;
+ bus-frequency = <66000000>; // 66 MHz ips bus
+
+ // IPIC
+ // interrupts cell = <intr #, sense>
+ // sense values match linux IORESOURCE_IRQ_* defines:
+ // sense == 8: Level, low assertion
+ // sense == 2: Edge, high-to-low change
+ //
+ ipic: interrupt-controller@c00 {
+ compatible = "fsl,mpc5121-ipic", "fsl,ipic";
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0xc00 0x100>;
+ };
+
+ rtc@a00 { // Real time clock
+ compatible = "fsl,mpc5121-rtc";
+ reg = <0xa00 0x100>;
+ interrupts = <79 0x8 80 0x8>;
+ };
+
+ reset@e00 { // Reset module
+ compatible = "fsl,mpc5125-reset";
+ reg = <0xe00 0x100>;
+ };
+
+ clock@f00 { // Clock control
+ compatible = "fsl,mpc5121-clock";
+ reg = <0xf00 0x100>;
+ };
+
+ pmc@1000{ // Power Management Controller
+ compatible = "fsl,mpc5121-pmc";
+ reg = <0x1000 0x100>;
+ interrupts = <83 0x2>;
+ };
+
+ gpio0: gpio@1100 {
+ compatible = "fsl,mpc5125-gpio";
+ reg = <0x1100 0x080>;
+ interrupts = <78 0x8>;
+ };
+
+ gpio1: gpio@1180 {
+ compatible = "fsl,mpc5125-gpio";
+ reg = <0x1180 0x080>;
+ interrupts = <86 0x8>;
+ };
+
+ can@1300 { // CAN rev.2
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <12 0x8>;
+ reg = <0x1300 0x80>;
+ };
+
+ can@1380 {
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <13 0x8>;
+ reg = <0x1380 0x80>;
+ };
+
+ sdhc@1500 {
+ compatible = "fsl,mpc5121-sdhc";
+ interrupts = <8 0x8>;
+ reg = <0x1500 0x100>;
+ };
+
+ i2c@1700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1700 0x20>;
+ interrupts = <0x9 0x8>;
+ };
+
+ i2c@1720 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1720 0x20>;
+ interrupts = <0xa 0x8>;
+ };
+
+ i2c@1740 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5121-i2c", "fsl-i2c";
+ reg = <0x1740 0x20>;
+ interrupts = <0xb 0x8>;
+ };
+
+ i2ccontrol@1760 {
+ compatible = "fsl,mpc5121-i2c-ctrl";
+ reg = <0x1760 0x8>;
+ };
+
+ diu@2100 {
+ compatible = "fsl,mpc5121-diu";
+ reg = <0x2100 0x100>;
+ interrupts = <64 0x8>;
+ };
+
+ mdio@2800 {
+ compatible = "fsl,mpc5121-fec-mdio";
+ reg = <0x2800 0x800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <1>;
+ };
+ };
+
+ eth0: ethernet@2800 {
+ compatible = "fsl,mpc5125-fec";
+ reg = <0x2800 0x800>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <4 0x8>;
+ phy-handle = < &phy0 >;
+ phy-connection-type = "rmii";
+ };
+
+ // IO control
+ ioctl@a000 {
+ compatible = "fsl,mpc5125-ioctl";
+ reg = <0xA000 0x1000>;
+ };
+
+ usb@3000 {
+ compatible = "fsl,mpc5121-usb2-dr";
+ reg = <0x3000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <43 0x8>;
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+
+ // 5125 PSCs are not 52xx or 5121 PSC compatible
+ // PSC1 uart0 aka ttyPSC0
+ serial@11100 {
+ compatible = "fsl,mpc5125-psc-uart", "fsl,mpc5125-psc";
+ reg = <0x11100 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ // PSC9 uart1 aka ttyPSC1
+ serial@11900 {
+ compatible = "fsl,mpc5125-psc-uart", "fsl,mpc5125-psc";
+ reg = <0x11900 0x100>;
+ interrupts = <40 0x8>;
+ fsl,rx-fifo-size = <16>;
+ fsl,tx-fifo-size = <16>;
+ };
+
+ pscfifo@11f00 {
+ compatible = "fsl,mpc5121-psc-fifo";
+ reg = <0x11f00 0x100>;
+ interrupts = <40 0x8>;
+ };
+
+ dma@14000 {
+ compatible = "fsl,mpc5121-dma"; // BSP name: "mpc512x-dma2"
+ reg = <0x14000 0x1800>;
+ interrupts = <65 0x8>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi
index 39ed65a..969b220 100644
--- a/arch/powerpc/boot/dts/mpc5200b.dtsi
+++ b/arch/powerpc/boot/dts/mpc5200b.dtsi
@@ -64,50 +64,59 @@
reg = <0x500 0x80>;
};
- timer@600 { // General Purpose Timer
+ gpt0: timer@600 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x600 0x10>;
interrupts = <1 9 0>;
+ // add 'fsl,has-wdt' to enable watchdog
};
- timer@610 { // General Purpose Timer
+ gpt1: timer@610 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x610 0x10>;
interrupts = <1 10 0>;
};
- timer@620 { // General Purpose Timer
+ gpt2: timer@620 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x620 0x10>;
interrupts = <1 11 0>;
};
- timer@630 { // General Purpose Timer
+ gpt3: timer@630 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x630 0x10>;
interrupts = <1 12 0>;
};
- timer@640 { // General Purpose Timer
+ gpt4: timer@640 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x640 0x10>;
interrupts = <1 13 0>;
};
- timer@650 { // General Purpose Timer
+ gpt5: timer@650 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x650 0x10>;
interrupts = <1 14 0>;
};
- timer@660 { // General Purpose Timer
+ gpt6: timer@660 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x660 0x10>;
interrupts = <1 15 0>;
};
- timer@670 { // General Purpose Timer
+ gpt7: timer@670 { // General Purpose Timer
compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+ #gpio-cells = <2>; // Add 'gpio-controller;' to enable gpio mode
reg = <0x670 0x10>;
interrupts = <1 16 0>;
};
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index f8a3b34..6c723ee 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -32,7 +32,7 @@
reg = <0 0 0 0>; // Filled by U-Boot
};
- lbc: localbus@ffe05000 {
+ lbc: localbus@fffe05000 {
reg = <0xf 0xffe05000 0 0x1000>;
ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
@@ -44,7 +44,7 @@
ranges = <0x0 0xf 0xffe00000 0x100000>;
};
- pci0: pci@ffe08000 {
+ pci0: pci@fffe08000 {
reg = <0xf 0xffe08000 0 0x1000>;
ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
@@ -59,7 +59,7 @@
0x8800 0 0 4 &mpic 4 1 0 0>;
};
- pci1: pcie@ffe09000 {
+ pci1: pcie@fffe09000 {
reg = <0xf 0xffe09000 0 0x1000>;
ranges = <0x02000000 0 0xf8000000 0xc 0x18000000 0 0x08000000
0x01000000 0 0x00000000 0xf 0xffc20000 0 0x00010000>;
diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts
index 21d3472..d3a792b 100644
--- a/arch/powerpc/boot/dts/mucmc52.dts
+++ b/arch/powerpc/boot/dts/mucmc52.dts
@@ -13,47 +13,23 @@
/include/ "mpc5200b.dtsi"
+/* Timer pins that need to be in GPIO mode */
+&gpt0 { gpio-controller; };
+&gpt1 { gpio-controller; };
+&gpt2 { gpio-controller; };
+&gpt3 { gpio-controller; };
+
+/* Disabled timers */
+&gpt4 { status = "disabled"; };
+&gpt5 { status = "disabled"; };
+&gpt6 { status = "disabled"; };
+&gpt7 { status = "disabled"; };
+
/ {
model = "manroland,mucmc52";
compatible = "manroland,mucmc52";
soc5200@f0000000 {
- gpt0: timer@600 { // GPT 0 in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt1: timer@610 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt2: timer@620 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt3: timer@630 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- timer@640 {
- status = "disabled";
- };
-
- timer@650 {
- status = "disabled";
- };
-
- timer@660 {
- status = "disabled";
- };
-
- timer@670 {
- status = "disabled";
- };
-
rtc@800 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
index 24f6680..cf073e6 100644
--- a/arch/powerpc/boot/dts/o2d.dtsi
+++ b/arch/powerpc/boot/dts/o2d.dtsi
@@ -12,6 +12,13 @@
/include/ "mpc5200b.dtsi"
+&gpt0 {
+ gpio-controller;
+ fsl,has-wdt;
+ fsl,wdt-on-boot = <0>;
+};
+&gpt1 { gpio-controller; };
+
/ {
model = "ifm,o2d";
compatible = "ifm,o2d";
@@ -22,24 +29,6 @@
soc5200@f0000000 {
- gpio_simple: gpio@b00 {
- };
-
- timer@600 { // General Purpose Timer
- #gpio-cells = <2>;
- gpio-controller;
- fsl,has-wdt;
- fsl,wdt-on-boot = <0>;
- };
-
- timer@610 {
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- timer7: timer@670 {
- };
-
rtc@800 {
status = "disabled";
};
@@ -118,7 +107,7 @@
csi@3,0 {
compatible = "ifm,o2d-csi";
reg = <3 0 0x00100000>;
- ifm,csi-clk-handle = <&timer7>;
+ ifm,csi-clk-handle = <&gpt7>;
gpios = <&gpio_simple 23 0 /* imag_capture */
&gpio_simple 26 0 /* imag_reset */
&gpio_simple 29 0>; /* imag_master_en */
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 96512c0..192e66a 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -14,51 +14,19 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+&gpt2 { gpio-controller; };
+&gpt3 { gpio-controller; };
+&gpt4 { gpio-controller; };
+&gpt5 { gpio-controller; };
+&gpt6 { gpio-controller; };
+&gpt7 { gpio-controller; };
+
/ {
model = "phytec,pcm030";
compatible = "phytec,pcm030";
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
- gpt2: timer@620 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt3: timer@630 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt4: timer@640 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt5: timer@650 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt6: timer@660 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt7: timer@670 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
- gpio-controller;
- #gpio-cells = <2>;
- };
-
audioplatform: psc@2000 { /* PSC1 in ac97 mode */
compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
cell-index = <0>;
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index 1dd478b..96b139b 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -14,6 +14,14 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { fsl,has-wdt; };
+&gpt2 { gpio-controller; };
+&gpt3 { gpio-controller; };
+&gpt4 { gpio-controller; };
+&gpt5 { gpio-controller; };
+&gpt6 { gpio-controller; };
+&gpt7 { gpio-controller; };
+
/ {
model = "phytec,pcm032";
compatible = "phytec,pcm032";
@@ -23,43 +31,6 @@
};
soc5200@f0000000 {
- timer@600 { // General Purpose Timer
- fsl,has-wdt;
- };
-
- gpt2: timer@620 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt3: timer@630 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt4: timer@640 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt5: timer@650 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt6: timer@660 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
- reg = <0x660 0x10>;
- interrupts = <1 15 0>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt7: timer@670 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
psc@2000 { /* PSC1 is ac97 */
compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97";
cell-index = <0>;
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 94dfa5c..7433740 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -13,47 +13,21 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "mpc5121.dtsi"
/ {
model = "pdm360ng";
- compatible = "ifm,pdm360ng";
+ compatible = "ifm,pdm360ng", "fsl,mpc5121";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&ipic>;
- aliases {
- ethernet0 = &eth0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,5121@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <0x20>; // 32 bytes
- i-cache-line-size = <0x20>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <49500000>;// 49.5 MHz (csb/4)
- bus-frequency = <198000000>; // 198 MHz csb bus
- clock-frequency = <396000000>; // 396 MHz ppc core
- };
- };
-
memory {
device_type = "memory";
reg = <0x00000000 0x20000000>; // 512MB at 0
};
nfc@40000000 {
- compatible = "fsl,mpc5121-nfc";
- reg = <0x40000000 0x100000>;
- interrupts = <0x6 0x8>;
- #address-cells = <0x1>;
- #size-cells = <0x1>;
bank-width = <0x1>;
chips = <0x1>;
@@ -63,17 +37,7 @@
};
};
- sram@50000000 {
- compatible = "fsl,mpc5121-sram";
- reg = <0x50000000 0x20000>; // 128K at 0x50000000
- };
-
localbus@80000020 {
- compatible = "fsl,mpc5121-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x80000020 0x40>;
-
ranges = <0x0 0x0 0xf0000000 0x10000000 /* Flash */
0x2 0x0 0x50040000 0x00020000>; /* CS2: MRAM */
@@ -129,74 +93,8 @@
};
soc@80000000 {
- compatible = "fsl,mpc5121-immr";
- #address-cells = <1>;
- #size-cells = <1>;
- #interrupt-cells = <2>;
- ranges = <0x0 0x80000000 0x400000>;
- reg = <0x80000000 0x400000>;
- bus-frequency = <66000000>; // 66 MHz ips bus
-
- // IPIC
- // interrupts cell = <intr #, sense>
- // sense values match linux IORESOURCE_IRQ_* defines:
- // sense == 8: Level, low assertion
- // sense == 2: Edge, high-to-low change
- //
- ipic: interrupt-controller@c00 {
- compatible = "fsl,mpc5121-ipic", "fsl,ipic";
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0xc00 0x100>;
- };
-
- rtc@a00 { // Real time clock
- compatible = "fsl,mpc5121-rtc";
- reg = <0xa00 0x100>;
- interrupts = <79 0x8 80 0x8>;
- };
-
- reset@e00 { // Reset module
- compatible = "fsl,mpc5121-reset";
- reg = <0xe00 0x100>;
- };
-
- clock@f00 { // Clock control
- compatible = "fsl,mpc5121-clock";
- reg = <0xf00 0x100>;
- };
-
- pmc@1000{ //Power Management Controller
- compatible = "fsl,mpc5121-pmc";
- reg = <0x1000 0x100>;
- interrupts = <83 0x2>;
- };
-
- gpio@1100 {
- compatible = "fsl,mpc5121-gpio";
- reg = <0x1100 0x100>;
- interrupts = <78 0x8>;
- };
-
- can@1300 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <12 0x8>;
- reg = <0x1300 0x80>;
- };
-
- can@1380 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <13 0x8>;
- reg = <0x1380 0x80>;
- };
i2c@1700 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5121-i2c";
- reg = <0x1700 0x20>;
- interrupts = <0x9 0x8>;
fsl,preserve-clocking;
eeprom@50 {
@@ -210,201 +108,92 @@
};
};
- i2c@1740 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5121-i2c";
- reg = <0x1740 0x20>;
- interrupts = <0xb 0x8>;
- fsl,preserve-clocking;
- };
-
- i2ccontrol@1760 {
- compatible = "fsl,mpc5121-i2c-ctrl";
- reg = <0x1760 0x8>;
- };
-
- axe@2000 {
- compatible = "fsl,mpc5121-axe";
- reg = <0x2000 0x100>;
- interrupts = <42 0x8>;
- };
-
- display@2100 {
- compatible = "fsl,mpc5121-diu";
- reg = <0x2100 0x100>;
- interrupts = <64 0x8>;
+ i2c@1720 {
+ status = "disabled";
};
- can@2300 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <90 0x8>;
- reg = <0x2300 0x80>;
- };
-
- can@2380 {
- compatible = "fsl,mpc5121-mscan";
- interrupts = <91 0x8>;
- reg = <0x2380 0x80>;
+ i2c@1740 {
+ fsl,preserve-clocking;
};
- viu@2400 {
- compatible = "fsl,mpc5121-viu";
- reg = <0x2400 0x400>;
- interrupts = <67 0x8>;
+ ethernet@2800 {
+ phy-handle = <&phy0>;
};
mdio@2800 {
- compatible = "fsl,mpc5121-fec-mdio";
- reg = <0x2800 0x200>;
- #address-cells = <1>;
- #size-cells = <0>;
- phy: ethernet-phy@0 {
+ phy0: ethernet-phy@1f {
compatible = "smsc,lan8700";
reg = <0x1f>;
};
};
- eth0: ethernet@2800 {
- compatible = "fsl,mpc5121-fec";
- reg = <0x2800 0x200>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <4 0x8>;
- phy-handle = < &phy >;
- };
-
- // USB1 using external ULPI PHY
+ /* USB1 using external ULPI PHY */
usb@3000 {
- compatible = "fsl,mpc5121-usb2-dr";
- reg = <0x3000 0x600>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <43 0x8>;
dr_mode = "host";
- phy_type = "ulpi";
};
- // USB0 using internal UTMI PHY
+ /* USB0 using internal UTMI PHY */
usb@4000 {
- compatible = "fsl,mpc5121-usb2-dr";
- reg = <0x4000 0x600>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x8>;
- dr_mode = "otg";
- phy_type = "utmi_wide";
fsl,invert-pwr-fault;
};
- // IO control
- ioctl@a000 {
- compatible = "fsl,mpc5121-ioctl";
- reg = <0xA000 0x1000>;
- };
-
- // 512x PSCs are not 52xx PSCs compatible
- serial@11000 {
+ psc@11000 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <0>;
- reg = <0x11000 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11100 {
+ psc@11100 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <1>;
- reg = <0x11100 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11200 {
+ psc@11200 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <2>;
- reg = <0x11200 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11300 {
+ psc@11300 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <3>;
- reg = <0x11300 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11400 {
+ psc@11400 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <4>;
- reg = <0x11400 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11600 {
- compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <6>;
- reg = <0x11600 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
+ psc@11500 {
+ status = "disabled";
};
- serial@11800 {
+ psc@11600 {
compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <8>;
- reg = <0x11800 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
};
- serial@11B00 {
- compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
- cell-index = <11>;
- reg = <0x11B00 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
+ psc@11700 {
+ status = "disabled";
};
- pscfifo@11f00 {
- compatible = "fsl,mpc5121-psc-fifo";
- reg = <0x11f00 0x100>;
- interrupts = <40 0x8>;
+ psc@11800 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
};
- spi@11900 {
+ psc@11900 {
compatible = "fsl,mpc5121-psc-spi", "fsl,mpc5121-psc";
- cell-index = <9>;
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x11900 0x100>;
- interrupts = <40 0x8>;
- fsl,rx-fifo-size = <16>;
- fsl,tx-fifo-size = <16>;
- // 7845 touch screen controller
+ /* ADS7845 touch screen controller */
ts@0 {
compatible = "ti,ads7846";
reg = <0x0>;
spi-max-frequency = <3000000>;
- // pen irq is GPIO25
+ /* pen irq is GPIO25 */
interrupts = <78 0x8>;
};
};
- dma@14000 {
- compatible = "fsl,mpc5121-dma";
- reg = <0x14000 0x1800>;
- interrupts = <65 0x8>;
+ psc@11a00 {
+ status = "disabled";
+ };
+
+ psc@11b00 {
+ compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
};
};
};
diff --git a/arch/powerpc/boot/dts/ppa8548.dts b/arch/powerpc/boot/dts/ppa8548.dts
new file mode 100644
index 0000000..f97ecee
--- /dev/null
+++ b/arch/powerpc/boot/dts/ppa8548.dts
@@ -0,0 +1,166 @@
+/*
+ * PPA8548 Device Tree Source (36-bit address map)
+ * Copyright 2013 Prodrive B.V.
+ *
+ * Based on:
+ * MPC8548 CDS Device Tree Source (36-bit address map)
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/mpc8548si-pre.dtsi"
+
+/ {
+ model = "ppa8548";
+ compatible = "ppa8548";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ memory {
+ device_type = "memory";
+ reg = <0 0 0x0 0x40000000>;
+ };
+
+ lbc: localbus@fe0005000 {
+ reg = <0xf 0xe0005000 0 0x1000>;
+ ranges = <0x0 0x0 0xf 0xff800000 0x00800000>;
+ };
+
+ soc: soc8548@fe0000000 {
+ ranges = <0 0xf 0xe0000000 0x100000>;
+ };
+
+ pci0: pci@fe0008000 {
+ /* ppa8548 board doesn't support PCI */
+ status = "disabled";
+ };
+
+ pci1: pci@fe0009000 {
+ /* ppa8548 board doesn't support PCI */
+ status = "disabled";
+ };
+
+ pci2: pcie@fe000a000 {
+ /* ppa8548 board doesn't support PCI */
+ status = "disabled";
+ };
+
+ rio: rapidio@fe00c0000 {
+ reg = <0xf 0xe00c0000 0x0 0x11000>;
+ port1 {
+ ranges = <0x0 0x0 0x0 0x80000000 0x0 0x40000000>;
+ };
+ };
+};
+
+&lbc {
+ nor@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x00800000>;
+ bank-width = <2>;
+ device-width = <2>;
+
+ partition@0 {
+ reg = <0x0 0x7A0000>;
+ label = "user";
+ };
+
+ partition@7A0000 {
+ reg = <0x7A0000 0x20000>;
+ label = "env";
+ read-only;
+ };
+
+ partition@7C0000 {
+ reg = <0x7C0000 0x40000>;
+ label = "u-boot";
+ read-only;
+ };
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@6f {
+ compatible = "intersil,isl1208";
+ reg = <0x6f>;
+ };
+ };
+
+ i2c@3100 {
+ };
+
+ /*
+ * Only ethernet controller @25000 and @26000 are used.
+ * Use alias enet2 and enet3 for the remainig controllers,
+ * to stay compatible with mpc8548si-pre.dtsi.
+ */
+ enet2: ethernet@24000 {
+ status = "disabled";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <7 1 0 0>;
+ reg = <0x0>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <8 1 0 0>;
+ reg = <0x1>;
+ device_type = "ethernet-phy";
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy0>;
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet1: ethernet@26000 {
+ tbi-handle = <&tbi2>;
+ phy-handle = <&phy1>;
+ };
+
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet3: ethernet@27000 {
+ status = "disabled";
+ };
+
+ mdio@27520 {
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ crypto@30000 {
+ status = "disabled";
+ };
+};
+
+/include/ "fsl/mpc8548si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/sbc8548-altflash.dts b/arch/powerpc/boot/dts/sbc8548-altflash.dts
new file mode 100644
index 0000000..0b38a0d
--- /dev/null
+++ b/arch/powerpc/boot/dts/sbc8548-altflash.dts
@@ -0,0 +1,115 @@
+/*
+ * SBC8548 Device Tree Source
+ *
+ * Configured for booting off the alternate (64MB SODIMM) flash.
+ * Requires switching JP12 jumpers and changing SW2.8 setting.
+ *
+ * Copyright 2013 Wind River Systems Inc.
+ *
+ * Paul Gortmaker (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+/dts-v1/;
+
+/include/ "sbc8548-pre.dtsi"
+
+/{
+ localbus@e0000000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ reg = <0xe0000000 0x5000>;
+ interrupt-parent = <&mpic>;
+
+ ranges = <0x0 0x0 0xfc000000 0x04000000 /*64MB Flash*/
+ 0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/
+ 0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/
+ 0x5 0x0 0xf8000000 0x00b10000 /* EPLD */
+ 0x6 0x0 0xef800000 0x00800000>; /*8MB Flash*/
+
+ flash@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0 0x0 0x04000000>;
+ compatible = "intel,JS28F128", "cfi-flash";
+ bank-width = <4>;
+ device-width = <1>;
+ partition@0x0 {
+ label = "space";
+ /* FC000000 -> FFEFFFFF */
+ reg = <0x00000000 0x03f00000>;
+ };
+ partition@0x03f00000 {
+ label = "bootloader";
+ /* FFF00000 -> FFFFFFFF */
+ reg = <0x03f00000 0x00100000>;
+ read-only;
+ };
+ };
+
+
+ epld@5,0 {
+ compatible = "wrs,epld-localbus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0x5 0x0 0x00b10000>;
+ ranges = <
+ 0x0 0x0 0x5 0x000000 0x1fff /* LED */
+ 0x1 0x0 0x5 0x100000 0x1fff /* Switches */
+ 0x3 0x0 0x5 0x300000 0x1fff /* HW Rev. */
+ 0xb 0x0 0x5 0xb00000 0x1fff /* EEPROM */
+ >;
+
+ led@0,0 {
+ compatible = "led";
+ reg = <0x0 0x0 0x1fff>;
+ };
+
+ switches@1,0 {
+ compatible = "switches";
+ reg = <0x1 0x0 0x1fff>;
+ };
+
+ hw-rev@3,0 {
+ compatible = "hw-rev";
+ reg = <0x3 0x0 0x1fff>;
+ };
+
+ eeprom@b,0 {
+ compatible = "eeprom";
+ reg = <0xb 0 0x1fff>;
+ };
+
+ };
+
+ alt-flash@6,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "intel,JS28F640", "cfi-flash";
+ reg = <0x6 0x0 0x800000>;
+ bank-width = <1>;
+ device-width = <1>;
+ partition@0x0 {
+ label = "space";
+ /* EF800000 -> EFF9FFFF */
+ reg = <0x00000000 0x007a0000>;
+ };
+ partition@0x7a0000 {
+ label = "bootloader";
+ /* EFFA0000 -> EFFFFFFF */
+ reg = <0x007a0000 0x00060000>;
+ read-only;
+ };
+ };
+
+
+ };
+};
+
+/include/ "sbc8548-post.dtsi"
diff --git a/arch/powerpc/boot/dts/sbc8548-post.dtsi b/arch/powerpc/boot/dts/sbc8548-post.dtsi
new file mode 100644
index 0000000..33a47e2
--- /dev/null
+++ b/arch/powerpc/boot/dts/sbc8548-post.dtsi
@@ -0,0 +1,295 @@
+/*
+ * SBC8548 Device Tree Source
+ *
+ * Copyright 2007 Wind River Systems Inc.
+ *
+ * Paul Gortmaker (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/{
+ soc8548@e0000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ ranges = <0x00000000 0xe0000000 0x00100000>;
+ bus-frequency = <0>;
+ compatible = "simple-bus";
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <10>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8548-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8548-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <0x12 0x2>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8548-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <0x20>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupt-parent = <&mpic>;
+ interrupts = <0x10 0x2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <0x2b 0x2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <0x2b 0x2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,mpc8548-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,mpc8548-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,mpc8548-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,mpc8548-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ enet0: ethernet@24000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ ranges = <0x0 0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
+ interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+
+ mdio@520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x520 0x20>;
+
+ phy0: ethernet-phy@19 {
+ interrupt-parent = <&mpic>;
+ interrupts = <0x6 0x1>;
+ reg = <0x19>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1a {
+ interrupt-parent = <&mpic>;
+ interrupts = <0x7 0x1>;
+ reg = <0x1a>;
+ device_type = "ethernet-phy";
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+ };
+
+ enet1: ethernet@25000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ ranges = <0x0 0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
+ interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy1>;
+
+ mdio@520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x4500 0x100>; // reg base, size
+ clock-frequency = <0>; // should we fill in in uboot?
+ interrupts = <0x2a 0x2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x4600 0x100>; // reg base, size
+ clock-frequency = <0>; // should we fill in in uboot?
+ interrupts = <0x2a 0x2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities reg
+ compatible = "fsl,mpc8548-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xfe>;
+ fsl,descriptor-types-mask = <0x12b0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+ };
+
+ pci0: pci@e0008000 {
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x01 (PCI-X slot) @66MHz */
+ 0x0800 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x0800 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x0800 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x0800 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 (PCI, 3.3V 32bit) @33MHz */
+ 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <0x18 0x2>;
+ bus-range = <0 0>;
+ ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
+ 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>;
+ clock-frequency = <66000000>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xe0008000 0x1000>;
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
+ device_type = "pci";
+ };
+
+ pci1: pcie@e000a000 {
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /* IDSEL 0x0 (PEX) */
+ 0x0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0x0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0x0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <0x1a 0x2>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
+ 0x01000000 0x0 0x00000000 0xe2800000 0x0 0x08000000>;
+ clock-frequency = <33000000>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xe000a000 0x1000>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x02000000 0x0 0xa0000000
+ 0x02000000 0x0 0xa0000000
+ 0x0 0x10000000
+
+ 0x01000000 0x0 0x00000000
+ 0x01000000 0x0 0x00000000
+ 0x0 0x00800000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/sbc8548-pre.dtsi b/arch/powerpc/boot/dts/sbc8548-pre.dtsi
new file mode 100644
index 0000000..d8c6629
--- /dev/null
+++ b/arch/powerpc/boot/dts/sbc8548-pre.dtsi
@@ -0,0 +1,52 @@
+/*
+ * SBC8548 Device Tree Source
+ *
+ * Copyright 2007 Wind River Systems Inc.
+ *
+ * Paul Gortmaker (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/{
+ model = "SBC8548";
+ compatible = "SBC8548";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8548@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <0x20>; // 32 bytes
+ i-cache-line-size = <0x20>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <0>; // From uboot
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+};
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
index 77be771..1df2a09 100644
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ b/arch/powerpc/boot/dts/sbc8548.dts
@@ -14,44 +14,9 @@
/dts-v1/;
-/ {
- model = "SBC8548";
- compatible = "SBC8548";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8548@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <0x20>; // 32 bytes
- i-cache-line-size = <0x20>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>; // From uboot
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x10000000>;
- };
+/include/ "sbc8548-pre.dtsi"
+/{
localbus@e0000000 {
#address-cells = <2>;
#size-cells = <1>;
@@ -63,23 +28,25 @@
0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/
0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/
0x5 0x0 0xf8000000 0x00b10000 /* EPLD */
- 0x6 0x0 0xfb800000 0x04000000>; /*64MB Flash*/
+ 0x6 0x0 0xec000000 0x04000000>; /*64MB Flash*/
flash@0,0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "cfi-flash";
+ compatible = "intel,JS28F640", "cfi-flash";
reg = <0x0 0x0 0x800000>;
bank-width = <1>;
device-width = <1>;
partition@0x0 {
label = "space";
- reg = <0x00000000 0x00100000>;
+ /* FF800000 -> FFF9FFFF */
+ reg = <0x00000000 0x007a0000>;
};
- partition@0x100000 {
+ partition@0x7a0000 {
label = "bootloader";
- reg = <0x00100000 0x00700000>;
+ /* FFFA0000 -> FFFFFFFF */
+ reg = <0x007a0000 0x00060000>;
read-only;
};
};
@@ -122,307 +89,22 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0x6 0x0 0x04000000>;
- compatible = "cfi-flash";
+ compatible = "intel,JS28F128", "cfi-flash";
bank-width = <4>;
device-width = <1>;
partition@0x0 {
+ label = "space";
+ /* EC000000 -> EFEFFFFF */
+ reg = <0x00000000 0x03f00000>;
+ };
+ partition@0x03f00000 {
label = "bootloader";
- reg = <0x00000000 0x00100000>;
+ /* EFF00000 -> EFFFFFFF */
+ reg = <0x03f00000 0x00100000>;
read-only;
};
- partition@0x00100000 {
- label = "file-system";
- reg = <0x00100000 0x01f00000>;
- };
- partition@0x02000000 {
- label = "boot-config";
- reg = <0x02000000 0x00100000>;
- };
- partition@0x02100000 {
- label = "space";
- reg = <0x02100000 0x01f00000>;
- };
};
};
-
- soc8548@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- ranges = <0x00000000 0xe0000000 0x00100000>;
- bus-frequency = <0>;
- compatible = "simple-bus";
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8548-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8548-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <0x12 0x2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8548-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <0x20>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <0x10 0x2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@19 {
- interrupt-parent = <&mpic>;
- interrupts = <0x6 0x1>;
- reg = <0x19>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1a {
- interrupt-parent = <&mpic>;
- interrupts = <0x7 0x1>;
- reg = <0x1a>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "fsl,ns16550", "ns16550";
- reg = <0x4500 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <0x2a 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "fsl,ns16550", "ns16550";
- reg = <0x4600 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <0x2a 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities reg
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- crypto@30000 {
- compatible = "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xfe>;
- fsl,descriptor-types-mask = <0x12b0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
- };
-
- pci0: pci@e0008000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x01 (PCI-X slot) @66MHz */
- 0x0800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x0800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x0800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x0800 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 (PCI, 3.3V 32bit) @33MHz */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <0x18 0x2>;
- bus-range = <0 0>;
- ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>;
- clock-frequency = <66000000>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008000 0x1000>;
- compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
- device_type = "pci";
- };
-
- pci1: pcie@e000a000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x0 (PEX) */
- 0x0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <0x1a 0x2>;
- bus-range = <0x0 0xff>;
- ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0xe2800000 0x0 0x08000000>;
- clock-frequency = <33000000>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x02000000 0x0 0xa0000000
- 0x02000000 0x0 0xa0000000
- 0x0 0x10000000
-
- 0x01000000 0x0 0x00000000
- 0x01000000 0x0 0x00000000
- 0x0 0x00800000>;
- };
- };
};
+
+/include/ "sbc8548-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
index 4eff41c..1781d02 100644
--- a/arch/powerpc/boot/dts/t4240qds.dts
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -6,13 +6,13 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
@@ -20,7 +20,7 @@
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
diff --git a/arch/powerpc/boot/dts/uc101.dts b/arch/powerpc/boot/dts/uc101.dts
index ba83d54..5c46219 100644
--- a/arch/powerpc/boot/dts/uc101.dts
+++ b/arch/powerpc/boot/dts/uc101.dts
@@ -13,54 +13,20 @@
/include/ "mpc5200b.dtsi"
+&gpt0 { gpio-controller; };
+&gpt1 { gpio-controller; };
+&gpt2 { gpio-controller; };
+&gpt3 { gpio-controller; };
+&gpt4 { gpio-controller; };
+&gpt5 { gpio-controller; };
+&gpt6 { gpio-controller; };
+&gpt7 { gpio-controller; };
+
/ {
model = "manroland,uc101";
compatible = "manroland,uc101";
soc5200@f0000000 {
- gpt0: timer@600 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt1: timer@610 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt2: timer@620 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt3: timer@630 { // General Purpose Timer in GPIO mode
- compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
- reg = <0x630 0x10>;
- interrupts = <1 12 0>;
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt4: timer@640 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt5: timer@650 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt6: timer@660 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
- gpt7: timer@670 { // General Purpose Timer in GPIO mode
- gpio-controller;
- #gpio-cells = <2>;
- };
-
rtc@800 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
index 52d8c1a..fc7073b 100644
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ b/arch/powerpc/boot/dts/virtex440-ml507.dts
@@ -272,6 +272,12 @@
xlnx,temac-type = <0>;
xlnx,txcsum = <1>;
xlnx,txfifo = <0x1000>;
+ phy-handle = <&phy7>;
+ clock-frequency = <100000000>;
+ phy7: phy@7 {
+ compatible = "marvell,88e1111";
+ reg = <7>;
+ } ;
} ;
} ;
IIC_EEPROM: i2c@81600000 {
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index a0dfef1..e12e60c 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -2,6 +2,8 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_SLAB=y
@@ -16,8 +18,6 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_KMETER1=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -45,7 +45,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PHRAM=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_MTD_UBI_DEBUG=y
CONFIG_PROC_DEVICETREE=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
@@ -76,5 +75,4 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
diff --git a/arch/powerpc/configs/85xx/ppa8548_defconfig b/arch/powerpc/configs/85xx/ppa8548_defconfig
new file mode 100644
index 0000000..a11337d
--- /dev/null
+++ b/arch/powerpc/configs/85xx/ppa8548_defconfig
@@ -0,0 +1,65 @@
+CONFIG_PPC_85xx=y
+CONFIG_PPA8548=y
+CONFIG_DTC=y
+CONFIG_DEFAULT_UIMAGE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_PCI is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_LOWMEM_SIZE_BOOL=y
+CONFIG_LOWMEM_SIZE=0x40000000
+CONFIG_LOWMEM_CAM_NUM_BOOL=y
+CONFIG_LOWMEM_CAM_NUM=4
+CONFIG_PAGE_OFFSET_BOOL=y
+CONFIG_PAGE_OFFSET=0xb0000000
+CONFIG_KERNEL_START_BOOL=y
+CONFIG_KERNEL_START=0xb0000000
+# CONFIG_PHYSICAL_START_BOOL is not set
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_PHYSICAL_ALIGN=0x04000000
+CONFIG_TASK_SIZE_BOOL=y
+CONFIG_TASK_SIZE=0xb0000000
+
+CONFIG_FSL_LBC=y
+CONFIG_FSL_DMA=y
+CONFIG_FSL_RIO=y
+
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_DMA_ENGINE=y
+CONFIG_RAPIDIO_TSI57X=y
+CONFIG_RAPIDIO_TSI568=y
+CONFIG_RAPIDIO_CPS_XX=y
+CONFIG_RAPIDIO_CPS_GEN2=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_PROC_DEVICETREE=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_PHYSMAP_OF=y
+
+CONFIG_I2C=y
+CONFIG_I2C_MPC=y
+CONFIG_I2C_CHARDEV
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_DRV_ISL1208=y
+
+CONFIG_NET=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_GIANFAR=y
+CONFIG_MARVELL_PHY=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 5b2b651..008a7a4 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -55,3 +55,22 @@ CONFIG_ROOT_NFS=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_PHYSMAP_OF=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index 29bb11e..4f35fc4 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -1,6 +1,6 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=256
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 255914d..60f3417 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -33,7 +33,6 @@ CONFIG_HIGHMEM=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
-CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 1aee931..5f2f6b8 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -3,7 +3,6 @@ CONFIG_PPC_BOOK3E_64=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=24
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
@@ -26,6 +25,7 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
+CONFIG_B4_QDS=y
CONFIG_P5020_DS=y
CONFIG_P5040_DS=y
CONFIG_PPC_QEMU_E500=y
@@ -33,7 +33,7 @@ CONFIG_T4240_QDS=y
CONFIG_B4_QDS=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_BINFMT_MISC=m
-CONFIG_IRQ_ALL_CPUS=y
+CONFIG_FSL_IFC=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
CONFIG_NET=y
@@ -71,19 +71,33 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -171,9 +185,16 @@ CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
+CONFIG_MISC_FILESYSTEMS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
@@ -182,6 +203,12 @@ CONFIG_NFSD=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 211fcc9..0d0d981 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -13,7 +13,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC512x=y
CONFIG_MPC5121_ADS=y
-CONFIG_MPC5121_GENERIC=y
+CONFIG_MPC512x_GENERIC=y
CONFIG_PDM360NG=y
# CONFIG_PPC_PMAC is not set
CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 2431b8b..f49cd1b 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,6 +1,7 @@
CONFIG_PPC_85xx=y
CONFIG_PHYS_64BIT=y
CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
@@ -48,6 +49,7 @@ CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FSL_IFC=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
@@ -80,18 +82,33 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
@@ -139,6 +156,9 @@ CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
CONFIG_NVRAM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
@@ -215,7 +235,16 @@ CONFIG_HUGETLBFS=y
CONFIG_HFSPLUS_FS=m
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
CONFIG_CRAMFS=y
CONFIG_VXFS_FS=m
CONFIG_HPFS_FS=m
@@ -227,6 +256,12 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index e3495bc..f409b5f 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -53,8 +53,8 @@ CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FSL_IFC=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
@@ -87,19 +87,33 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
@@ -224,6 +238,15 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -245,6 +268,12 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_CRC_T10DIF=y
+CONFIG_CRC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 840a2c2..bd8a6f7 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -1,28 +1,27 @@
CONFIG_PPC64=y
CONFIG_ALTIVEC=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_PASEMI=y
CONFIG_PPC_PASEMI_IOMMU=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_DEBUG=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_1000=y
CONFIG_PPC_64K_PAGES=y
# CONFIG_SECCOMP is not set
@@ -47,7 +46,6 @@ CONFIG_INET_ESP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_SLRAM=y
@@ -58,7 +56,6 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
@@ -91,21 +88,19 @@ CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
-CONFIG_NET_PCI=y
-CONFIG_E1000=y
CONFIG_TIGON3=y
+CONFIG_E1000=y
CONFIG_PASEMI_MAC=y
+CONFIG_MARVELL_PHY=y
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=4
CONFIG_HW_RANDOM=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
@@ -146,14 +141,11 @@ CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_SL811_HCD=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_PASEMI=y
@@ -164,8 +156,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=y
@@ -177,27 +167,22 @@ CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V4=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
+CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_BLOWFISH=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6d03530..aef3f71 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -5,6 +5,9 @@ CONFIG_SMP=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
@@ -21,6 +24,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
@@ -42,10 +47,9 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC64=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
+CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
@@ -73,7 +77,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m
@@ -130,19 +133,12 @@ CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -151,7 +147,10 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_BPF_JIT=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
@@ -173,7 +172,6 @@ CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
@@ -205,13 +203,6 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
-CONFIG_IEEE1394=y
-CONFIG_IEEE1394_OHCI1394=y
-CONFIG_IEEE1394_SBP2=m
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=y
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
CONFIG_ADB_PMU=y
CONFIG_PMAC_SMU=y
CONFIG_THERM_PM72=y
@@ -220,50 +211,43 @@ CONFIG_WINDFARM_PM81=y
CONFIG_WINDFARM_PM91=y
CONFIG_WINDFARM_PM112=y
CONFIG_WINDFARM_PM121=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
-CONFIG_MARVELL_PHY=y
-CONFIG_BROADCOM_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_SUNGEM=y
-CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
-CONFIG_IBMVETH=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_E100=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
+CONFIG_PCNET32=y
CONFIG_TIGON3=y
-CONFIG_BNX2=m
-CONFIG_SPIDER_NET=m
-CONFIG_GELIC_NET=m
-CONFIG_GELIC_WIRELESS=y
CONFIG_CHELSIO_T1=m
-CONFIG_CHELSIO_T3=m
-CONFIG_CHELSIO_T4=m
+CONFIG_BE2NET=m
+CONFIG_S2IO=m
+CONFIG_IBMVETH=m
CONFIG_EHEA=m
-CONFIG_IXGBE=m
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
CONFIG_IXGB=m
-CONFIG_S2IO=m
+CONFIG_IXGBE=m
+CONFIG_MLX4_EN=m
CONFIG_MYRI10GE=m
-CONFIG_NETXEN_NIC=m
CONFIG_PASEMI_MAC=y
-CONFIG_MLX4_EN=m
CONFIG_QLGE=m
-CONFIG_BE2NET=m
+CONFIG_NETXEN_NIC=m
+CONFIG_SUNGEM=y
+CONFIG_GELIC_NET=m
+CONFIG_GELIC_WIRELESS=y
+CONFIG_SPIDER_NET=m
+CONFIG_MARVELL_PHY=y
+CONFIG_BROADCOM_PHY=m
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
-CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
@@ -279,13 +263,10 @@ CONFIG_HVC_RTAS=y
CONFIG_HVC_BEAT=y
CONFIG_HVCS=m
CONFIG_IBM_BSR=m
-CONFIG_HW_RANDOM=m
-CONFIG_HW_RANDOM_PSERIES=m
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
CONFIG_I2C_PASEMI=y
-# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
@@ -300,7 +281,6 @@ CONFIG_FB_RADEON=y
CONFIG_FB_IBM_GXT4500=y
CONFIG_FB_PS3=m
CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_DISPLAY_SUPPORT=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -317,18 +297,16 @@ CONFIG_SND_AOA_FABRIC_LAYOUT=m
CONFIG_SND_AOA_ONYX=m
CONFIG_SND_AOA_TAS=m
CONFIG_SND_AOA_TOONIE=m
-CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=m
@@ -370,11 +348,9 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
-CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
@@ -383,100 +359,53 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
-CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
-CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
@@ -486,11 +415,9 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
CONFIG_VHOST_NET=m
-CONFIG_BPF_JIT=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index f55c276..4b20f76 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -4,6 +4,8 @@ CONFIG_SMP=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
@@ -18,12 +20,13 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_EFI_PARTITION=y
CONFIG_P5020_DS=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y
@@ -46,7 +49,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m
@@ -103,19 +105,12 @@ CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -125,6 +120,8 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
@@ -167,41 +164,31 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
-CONFIG_IEEE1394=y
-CONFIG_IEEE1394_OHCI1394=y
-CONFIG_IEEE1394_SBP2=m
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=y
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
CONFIG_MACINTOSH_DRIVERS=y
CONFIG_WINDFARM=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
-CONFIG_MARVELL_PHY=y
-CONFIG_BROADCOM_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_SUNGEM=y
-CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_E100=y
CONFIG_ACENIC=y
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_E1000=y
+CONFIG_PCNET32=y
CONFIG_TIGON3=y
+CONFIG_E100=y
+CONFIG_E1000=y
CONFIG_IXGB=m
+CONFIG_SUNGEM=y
+CONFIG_MARVELL_PHY=y
+CONFIG_BROADCOM_PHY=m
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
-CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
@@ -213,7 +200,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
-# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
@@ -227,7 +213,6 @@ CONFIG_FB_MATROX_MAVEN=m
CONFIG_FB_RADEON=y
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_DISPLAY_SUPPORT=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -238,7 +223,6 @@ CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
@@ -253,8 +237,8 @@ CONFIG_HID_SMARTJOYPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
@@ -296,73 +280,36 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
+CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
@@ -371,16 +318,12 @@ CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index c2f4b4a..139a830 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -3,10 +3,11 @@ CONFIG_TUNE_CELL=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_LZMA=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_PERF_EVENTS is not set
@@ -24,12 +25,13 @@ CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=y
CONFIG_PS3_FLASH=y
CONFIG_PS3_VRAM=m
+CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_KEXEC=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
+# CONFIG_COMPACTION is not set
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
@@ -59,6 +61,7 @@ CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
@@ -78,7 +81,6 @@ CONFIG_MD=y
CONFIG_BLK_DEV_DM=m
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -119,22 +121,21 @@ CONFIG_SND=m
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_USB_AUDIO=m
CONFIG_HIDRAW=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
CONFIG_HID_CHERRY=m
CONFIG_HID_EZKEY=m
CONFIG_HID_TWINHAN=m
CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
CONFIG_HID_MICROSOFT=m
+CONFIG_HID_PS3REMOTE=m
CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_USB_HIDDEV=y
CONFIG_USB=m
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
@@ -158,8 +159,8 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_NLS=y
@@ -176,6 +177,7 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CRYPTO_CCM=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 5b8e1e5..c4dfbaf 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -6,12 +6,15 @@ CONFIG_NR_CPUS=2048
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -29,6 +32,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
@@ -36,10 +41,9 @@ CONFIG_DTL=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
+CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
@@ -65,7 +69,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_UDPLITE=m
@@ -112,20 +115,15 @@ CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PROC_DEVICETREE=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -146,7 +144,6 @@ CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
@@ -177,43 +174,36 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL_TRAP=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
-CONFIG_IBMVETH=y
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_E100=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
+CONFIG_PCNET32=y
CONFIG_TIGON3=y
-CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
-CONFIG_CHELSIO_T3=m
-CONFIG_CHELSIO_T4=m
+CONFIG_BE2NET=m
+CONFIG_S2IO=m
+CONFIG_IBMVETH=y
CONFIG_EHEA=y
-CONFIG_IXGBE=m
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
CONFIG_IXGB=m
-CONFIG_S2IO=m
-CONFIG_MYRI10GE=m
-CONFIG_NETXEN_NIC=m
+CONFIG_IXGBE=m
CONFIG_MLX4_EN=m
+CONFIG_MYRI10GE=m
CONFIG_QLGE=m
-CONFIG_BE2NET=m
+CONFIG_NETXEN_NIC=m
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
-CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
@@ -227,12 +217,9 @@ CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
CONFIG_IBM_BSR=m
-CONFIG_HW_RANDOM=m
-CONFIG_HW_RANDOM_PSERIES=m
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=1024
-# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_OF=y
@@ -243,19 +230,17 @@ CONFIG_FB_MATROX_G=y
CONFIG_FB_RADEON=y
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_DISPLAY_SUPPORT=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
@@ -293,7 +278,6 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
-CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
@@ -305,61 +289,49 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
-CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_CIFS=m
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
+CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_STACK_USAGE=y
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
@@ -369,7 +341,6 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
new file mode 100644
index 0000000..2926fb9
--- /dev/null
+++ b/arch/powerpc/crypto/Makefile
@@ -0,0 +1,9 @@
+#
+# powerpc/crypto/Makefile
+#
+# Arch-specific CryptoAPI modules.
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
+
+sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
new file mode 100644
index 0000000..125e165
--- /dev/null
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -0,0 +1,179 @@
+/*
+ * SHA-1 implementation for PowerPC.
+ *
+ * Copyright (C) 2005 Paul Mackerras <paulus@samba.org>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * We roll the registers for T, A, B, C, D, E around on each
+ * iteration; T on iteration t is A on iteration t+1, and so on.
+ * We use registers 7 - 12 for this.
+ */
+#define RT(t) ((((t)+5)%6)+7)
+#define RA(t) ((((t)+4)%6)+7)
+#define RB(t) ((((t)+3)%6)+7)
+#define RC(t) ((((t)+2)%6)+7)
+#define RD(t) ((((t)+1)%6)+7)
+#define RE(t) ((((t)+0)%6)+7)
+
+/* We use registers 16 - 31 for the W values */
+#define W(t) (((t)%16)+16)
+
+#define LOADW(t) \
+ lwz W(t),(t)*4(r4)
+
+#define STEPD0_LOAD(t) \
+ andc r0,RD(t),RB(t); \
+ and r6,RB(t),RC(t); \
+ rotlwi RT(t),RA(t),5; \
+ or r6,r6,r0; \
+ add r0,RE(t),r15; \
+ add RT(t),RT(t),r6; \
+ add r14,r0,W(t); \
+ lwz W((t)+4),((t)+4)*4(r4); \
+ rotlwi RB(t),RB(t),30; \
+ add RT(t),RT(t),r14
+
+#define STEPD0_UPDATE(t) \
+ and r6,RB(t),RC(t); \
+ andc r0,RD(t),RB(t); \
+ rotlwi RT(t),RA(t),5; \
+ rotlwi RB(t),RB(t),30; \
+ or r6,r6,r0; \
+ add r0,RE(t),r15; \
+ xor r5,W((t)+4-3),W((t)+4-8); \
+ add RT(t),RT(t),r6; \
+ xor W((t)+4),W((t)+4-16),W((t)+4-14); \
+ add r0,r0,W(t); \
+ xor W((t)+4),W((t)+4),r5; \
+ add RT(t),RT(t),r0; \
+ rotlwi W((t)+4),W((t)+4),1
+
+#define STEPD1(t) \
+ xor r6,RB(t),RC(t); \
+ rotlwi RT(t),RA(t),5; \
+ rotlwi RB(t),RB(t),30; \
+ xor r6,r6,RD(t); \
+ add r0,RE(t),r15; \
+ add RT(t),RT(t),r6; \
+ add r0,r0,W(t); \
+ add RT(t),RT(t),r0
+
+#define STEPD1_UPDATE(t) \
+ xor r6,RB(t),RC(t); \
+ rotlwi RT(t),RA(t),5; \
+ rotlwi RB(t),RB(t),30; \
+ xor r6,r6,RD(t); \
+ add r0,RE(t),r15; \
+ xor r5,W((t)+4-3),W((t)+4-8); \
+ add RT(t),RT(t),r6; \
+ xor W((t)+4),W((t)+4-16),W((t)+4-14); \
+ add r0,r0,W(t); \
+ xor W((t)+4),W((t)+4),r5; \
+ add RT(t),RT(t),r0; \
+ rotlwi W((t)+4),W((t)+4),1
+
+#define STEPD2_UPDATE(t) \
+ and r6,RB(t),RC(t); \
+ and r0,RB(t),RD(t); \
+ rotlwi RT(t),RA(t),5; \
+ or r6,r6,r0; \
+ rotlwi RB(t),RB(t),30; \
+ and r0,RC(t),RD(t); \
+ xor r5,W((t)+4-3),W((t)+4-8); \
+ or r6,r6,r0; \
+ xor W((t)+4),W((t)+4-16),W((t)+4-14); \
+ add r0,RE(t),r15; \
+ add RT(t),RT(t),r6; \
+ add r0,r0,W(t); \
+ xor W((t)+4),W((t)+4),r5; \
+ add RT(t),RT(t),r0; \
+ rotlwi W((t)+4),W((t)+4),1
+
+#define STEP0LD4(t) \
+ STEPD0_LOAD(t); \
+ STEPD0_LOAD((t)+1); \
+ STEPD0_LOAD((t)+2); \
+ STEPD0_LOAD((t)+3)
+
+#define STEPUP4(t, fn) \
+ STEP##fn##_UPDATE(t); \
+ STEP##fn##_UPDATE((t)+1); \
+ STEP##fn##_UPDATE((t)+2); \
+ STEP##fn##_UPDATE((t)+3)
+
+#define STEPUP20(t, fn) \
+ STEPUP4(t, fn); \
+ STEPUP4((t)+4, fn); \
+ STEPUP4((t)+8, fn); \
+ STEPUP4((t)+12, fn); \
+ STEPUP4((t)+16, fn)
+
+_GLOBAL(powerpc_sha_transform)
+ PPC_STLU r1,-INT_FRAME_SIZE(r1)
+ SAVE_8GPRS(14, r1)
+ SAVE_10GPRS(22, r1)
+
+ /* Load up A - E */
+ lwz RA(0),0(r3) /* A */
+ lwz RB(0),4(r3) /* B */
+ lwz RC(0),8(r3) /* C */
+ lwz RD(0),12(r3) /* D */
+ lwz RE(0),16(r3) /* E */
+
+ LOADW(0)
+ LOADW(1)
+ LOADW(2)
+ LOADW(3)
+
+ lis r15,0x5a82 /* K0-19 */
+ ori r15,r15,0x7999
+ STEP0LD4(0)
+ STEP0LD4(4)
+ STEP0LD4(8)
+ STEPUP4(12, D0)
+ STEPUP4(16, D0)
+
+ lis r15,0x6ed9 /* K20-39 */
+ ori r15,r15,0xeba1
+ STEPUP20(20, D1)
+
+ lis r15,0x8f1b /* K40-59 */
+ ori r15,r15,0xbcdc
+ STEPUP20(40, D2)
+
+ lis r15,0xca62 /* K60-79 */
+ ori r15,r15,0xc1d6
+ STEPUP4(60, D1)
+ STEPUP4(64, D1)
+ STEPUP4(68, D1)
+ STEPUP4(72, D1)
+ lwz r20,16(r3)
+ STEPD1(76)
+ lwz r19,12(r3)
+ STEPD1(77)
+ lwz r18,8(r3)
+ STEPD1(78)
+ lwz r17,4(r3)
+ STEPD1(79)
+
+ lwz r16,0(r3)
+ add r20,RE(80),r20
+ add RD(0),RD(80),r19
+ add RC(0),RC(80),r18
+ add RB(0),RB(80),r17
+ add RA(0),RA(80),r16
+ mr RE(0),r20
+ stw RA(0),0(r3)
+ stw RB(0),4(r3)
+ stw RC(0),8(r3)
+ stw RD(0),12(r3)
+ stw RE(0),16(r3)
+
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+ addi r1,r1,INT_FRAME_SIZE
+ blr
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
new file mode 100644
index 0000000..f9e8b94
--- /dev/null
+++ b/arch/powerpc/crypto/sha1.c
@@ -0,0 +1,157 @@
+/*
+ * Cryptographic API.
+ *
+ * powerpc implementation of the SHA1 Secure Hash Algorithm.
+ *
+ * Derived from cryptoapi implementation, adapted for in-place
+ * scatterlist interface.
+ *
+ * Derived from "crypto/sha1.c"
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+
+extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp);
+
+static int sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+
+ return 0;
+}
+
+static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ u32 temp[SHA_WORKSPACE_WORDS];
+
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buffer + partial, data, done + 64);
+ src = sctx->buffer;
+ }
+
+ do {
+ powerpc_sha_transform(sctx->state, src, temp);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ memset(temp, 0, sizeof(temp));
+ partial = 0;
+ }
+ memcpy(sctx->buffer + partial, src, len - done);
+
+ return 0;
+}
+
+
+/* Add padding and return the message digest. */
+static int sha1_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *dst = (__be32 *)out;
+ u32 i, index, padlen;
+ __be64 bits;
+ static const u8 padding[64] = { 0x80, };
+
+ bits = cpu_to_be64(sctx->count << 3);
+
+ /* Pad out to 56 mod 64 */
+ index = sctx->count & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(desc, padding, padlen);
+
+ /* Append length */
+ sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = 0; i < 5; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof *sctx);
+
+ return 0;
+}
+
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
+static struct shash_alg alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = sha1_init,
+ .update = sha1_update,
+ .final = sha1_final,
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name= "sha1-powerpc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha1_powerpc_mod_init(void)
+{
+ return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_powerpc_mod_fini(void)
+{
+ crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_powerpc_mod_init);
+module_exit(sha1_powerpc_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+
+MODULE_ALIAS("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ef918a2..910194e 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,10 +52,8 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
/* Macro for generating the ***_bits() functions */
-#define DEFINE_BITOP(fn, op, prefix, postfix) \
+#define DEFINE_BITOP(fn, op, prefix) \
static __inline__ void fn(unsigned long mask, \
volatile unsigned long *_p) \
{ \
@@ -68,16 +66,15 @@ static __inline__ void fn(unsigned long mask, \
PPC405_ERR77(0,%3) \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
- postfix \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "cc", "memory"); \
}
-DEFINE_BITOP(set_bits, or, "", "")
-DEFINE_BITOP(clear_bits, andc, "", "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "")
-DEFINE_BITOP(change_bits, xor, "", "")
+DEFINE_BITOP(set_bits, or, "")
+DEFINE_BITOP(clear_bits, andc, "")
+DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
+DEFINE_BITOP(change_bits, xor, "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 0000000..b6f5a33
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl .schedule_user
+#else
+#define SCHEDULE_USER bl .schedule
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e354127..8f57a4b 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -59,6 +59,7 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
+ unsigned int cpu_user_features2; /* Userland features v2 */
unsigned int mmu_features; /* MMU features */
/* cache line sizes */
@@ -116,37 +117,37 @@ extern const char *powerpc_base_platform;
/* CPU kernel features */
/* Retain the 32b definitions all use bottom half of word */
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001)
-#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
-#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
-#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
-#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
-#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
-#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
-#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
-#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
-#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200)
-#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
-#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
-#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
-#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
-#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
-#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
-#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
-#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
-#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
-#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
-#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
-#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
-#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
-#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
-#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
-#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
-#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
-#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
-#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001)
+#define CPU_FTR_L2CR ASM_CONST(0x00000002)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008)
+#define CPU_FTR_TAU ASM_CONST(0x00000010)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020)
+#define CPU_FTR_USE_TB ASM_CONST(0x00000040)
+#define CPU_FTR_L2CSR ASM_CONST(0x00000080)
+#define CPU_FTR_601 ASM_CONST(0x00000100)
+#define CPU_FTR_DBELL ASM_CONST(0x00000200)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400)
+#define CPU_FTR_L3CR ASM_CONST(0x00000800)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x00008000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x00010000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000)
+#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x00200000)
+#define CPU_FTR_REAL_LE ASM_CONST(0x00400000)
+#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000)
+#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000)
+#define CPU_FTR_SPE ASM_CONST(0x02000000)
+#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000)
+#define CPU_FTR_LWSYNC ASM_CONST(0x08000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000)
+#define CPU_FTR_EMB_HV ASM_CONST(0x40000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -158,29 +159,34 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
-#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
-#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
-#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
-#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
-#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
-#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
-#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
-#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
-#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
-#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
-#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
-#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
-#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
-#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
-#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
-#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
-#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
-#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000)
+#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
+#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000)
+#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000)
+#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000)
+#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000)
+#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000)
+#define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000)
+#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000)
+#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
+#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@@ -226,6 +232,15 @@ extern const char *powerpc_base_platform;
#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
#endif
+/* We only set the TM feature if the kernel was compiled with TM supprt */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define CPU_FTR_TM_COMP CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
+#else
+#define CPU_FTR_TM_COMP 0
+#define PPC_FEATURE2_HTM_COMP 0
+#endif
+
/* We need to mark all pages as being coherent if we're SMP or we have a
* 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
* require it for PCI "streaming/prefetch" to work properly.
@@ -391,19 +406,20 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
- CPU_FTR_HVMODE)
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -411,7 +427,8 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -419,19 +436,22 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
- CPU_FTR_UNALIGNED_LD_STD)
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE)
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_ICSWX | CPU_FTR_DABRX )
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 483733b..607559a 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
* the same units as the timebase. Otherwise we measure cpu time
* in jiffies using the generic definitions.
*/
@@ -16,7 +16,7 @@
#ifndef __POWERPC_CPUTIME_H
#define __POWERPC_CPUTIME_H
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm-generic/cputime.h>
#ifdef __KERNEL__
static inline void setup_cputime_one_jiffy(void) { }
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
#endif /* __KERNEL__ */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 607e4ee..5fa6b20 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -28,8 +28,36 @@ enum ppc_dbell {
PPC_G_DBELL = 2, /* guest doorbell */
PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */
PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
+ PPC_DBELL_SERVER = 5, /* doorbell on server */
};
+#ifdef CONFIG_PPC_BOOK3S
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER
+#define SPRN_DOORBELL_CPUTAG SPRN_TIR
+#define PPC_DBELL_TAG_MASK 0x7f
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ else
+ __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg));
+}
+
+#else /* CONFIG_PPC_BOOK3S */
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL
+#define SPRN_DOORBELL_CPUTAG SPRN_PIR
+#define PPC_DBELL_TAG_MASK 0x3fff
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+#endif /* CONFIG_PPC_BOOK3S */
+
extern void doorbell_cause_ipi(int cpu, unsigned long data);
extern void doorbell_exception(struct pt_regs *regs);
extern void doorbell_setup_this_cpu(void);
@@ -39,7 +67,7 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
(tag & 0x07ffffff);
- __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ _ppc_msgsnd(msg);
}
#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 32de257..d251630 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -4,6 +4,8 @@
#ifndef _ASM_POWERPC_DEBUG_H
#define _ASM_POWERPC_DEBUG_H
+#include <asm/hw_breakpoint.h>
+
struct pt_regs;
extern struct dentry *powerpc_debugfs_root;
@@ -15,7 +17,7 @@ extern int (*__debugger_ipi)(struct pt_regs *regs);
extern int (*__debugger_bpt)(struct pt_regs *regs);
extern int (*__debugger_sstep)(struct pt_regs *regs);
extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_break_match)(struct pt_regs *regs);
extern int (*__debugger_fault_handler)(struct pt_regs *regs);
#define DEBUGGER_BOILERPLATE(__NAME) \
@@ -31,7 +33,7 @@ DEBUGGER_BOILERPLATE(debugger_ipi)
DEBUGGER_BOILERPLATE(debugger_bpt)
DEBUGGER_BOILERPLATE(debugger_sstep)
DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_break_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
#else
@@ -40,17 +42,18 @@ static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-extern int set_dabr(unsigned long dabr, unsigned long dabrx);
+int set_breakpoint(struct arch_hw_breakpoint *brk);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int signal_code, int brkpt);
#else
-extern void do_dabr(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
+
+extern void do_break(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
#endif
#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index f6813e9..a5c6d83 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -16,10 +16,6 @@
*
* None of this really applies for Power Macintoshes. There is
* basically just enough here to get kernel/dma.c to compile.
- *
- * There may be some comments or restrictions made here which are
- * not valid for the PReP platform. Take what you read
- * with a grain of salt.
*/
#include <asm/io.h>
@@ -57,7 +53,6 @@
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
- * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
* On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index a8fb03e..a80e32b4 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -201,6 +201,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev);
void __init eeh_addr_cache_build(void);
void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_tree_late(struct pci_bus *);
+void eeh_add_sysfs_files(struct pci_bus *);
void eeh_remove_bus_device(struct pci_dev *, int);
/**
@@ -240,6 +241,8 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+
static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
static inline void eeh_lock(void) { }
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 6abf0a1..cc0655a 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -61,6 +61,7 @@ typedef elf_vrregset_t elf_fpxregset_t;
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
@@ -103,8 +104,6 @@ do { \
# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
(exec_stk == EXSTACK_DEFAULT) : 0)
#else
-# define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index ad708dd..46793b5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -47,9 +47,10 @@
#define EX_R3 64
#define EX_LR 72
#define EX_CFAR 80
+#define EX_PPR 88 /* SMT thread status register (priority) */
#ifdef CONFIG_RELOCATABLE
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
@@ -60,13 +61,15 @@
blr;
#else
/* If not relocatable, we can jump directly -- and save messing with LR */
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
mtmsrd r10,1; /* Set RI (EE=0) */ \
b label;
#endif
+#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
/*
* As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
@@ -74,6 +77,7 @@
* case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
*/
#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
@@ -107,14 +111,59 @@
#define RESTORE_LR(reg, area)
#endif
-#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+/*
+ * PPR save/restore macros used in exceptions_64s.S
+ * Used for P7 or later processors
+ */
+#define SAVE_PPR(area, ra, rb) \
+BEGIN_FTR_SECTION_NESTED(940) \
+ ld ra,PACACURRENT(r13); \
+ ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
+ std rb,TASKTHREADPPR(ra); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
+
+#define RESTORE_PPR_PACA(area, ra) \
+BEGIN_FTR_SECTION_NESTED(941) \
+ ld ra,area+EX_PPR(r13); \
+ mtspr SPRN_PPR,ra; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
+
+/*
+ * Increase the priority on systems where PPR save/restore is not
+ * implemented/ supported.
+ */
+#define HMT_MEDIUM_PPR_DISCARD \
+BEGIN_FTR_SECTION_NESTED(942) \
+ HMT_MEDIUM; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/
+
+/*
+ * Get an SPR into a register if the CPU has the given feature
+ */
+#define OPT_GET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mfspr ra,spr; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Save a register to the PACA if the CPU has the given feature
+ */
+#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ std ra,offset(r13); \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+#define EXCEPTION_PROLOG_0(area) \
GET_PACA(r13); \
- std r9,area+EX_R9(r13); /* save r9 - r12 */ \
- std r10,area+EX_R10(r13); \
- BEGIN_FTR_SECTION_NESTED(66); \
- mfspr r10,SPRN_CFAR; \
- std r10,area+EX_CFAR(r13); \
- END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ std r9,area+EX_R9(r13); /* save r9 */ \
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
+ HMT_MEDIUM; \
+ std r10,area+EX_R10(r13); /* save r10 - r12 */ \
+ OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
SAVE_LR(r10, area); \
mfcr r9; \
extra(vec); \
@@ -139,6 +188,7 @@
__EXCEPTION_PROLOG_PSERIES_1(label, h)
#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
@@ -149,10 +199,14 @@
#define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \
+ BEGIN_FTR_SECTION_NESTED(947) \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,HSTATE_CFAR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
ld r10,area+EX_R10(r13); \
- stw r9,HSTATE_SCRATCH1(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \
b kvmppc_interrupt
@@ -224,8 +278,10 @@ do_kvm_##n: \
std r10,0(r1); /* make stack chain pointer */ \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
+ beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r9, r10); \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_PPR(area, r9, r10); \
+4: std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
@@ -266,45 +322,74 @@ do_kvm_##n: \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_STD, KVMTEST_PR, vec)
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
#define STD_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
. = loc; \
.globl label##_relon_pSeries; \
label##_relon_pSeries: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_STD, KVMTEST_PR, vec)
+#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_relon_hv; \
label##_relon_hv: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
+#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
@@ -329,10 +414,11 @@ label##_relon_hv: \
#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
- HMT_MEDIUM; \
SET_SCRATCH0(r13); /* save r13 */ \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+
#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
__MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
@@ -340,6 +426,7 @@ label##_relon_hv: \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
+ HMT_MEDIUM_PPR_DISCARD; \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_TEST_PR)
@@ -350,9 +437,16 @@ label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
+#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
__EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
@@ -372,6 +466,12 @@ label##_relon_hv: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_NOTEST_HV)
+#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
/*
* Our exception common code can be passed various "additions"
* to specify the behaviour of interrupts, whether to kick the
@@ -413,7 +513,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+ FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 973cc3b..681bc03 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -18,7 +18,6 @@
#include <asm/feature-fixups.h>
/* firmware feature bitmask values */
-#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
@@ -50,6 +49,10 @@
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
+#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
#ifndef __ASSEMBLY__
@@ -64,9 +67,11 @@ enum {
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
- FW_FEATURE_SET_MODE,
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+ FW_FEATURE_OPALv3,
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3147a29..3bdcfce 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -10,6 +10,9 @@ typedef struct {
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
+#ifdef CONFIG_PPC_DOORBELL
+ unsigned int doorbell_irqs;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 62e11a3..f2498c8 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,9 +3,37 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
extern struct kmem_cache *hugepte_cache;
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ /*
+ * We have only four bits to encode, MMU page size
+ */
+ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+ return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+ return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+
+#else
+
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
@@ -17,6 +45,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
return hpd.pd & HUGEPD_SHIFT_MASK;
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
unsigned pdshift)
{
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0975e5c..0c7f2bf 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -264,12 +264,16 @@
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
+#define H_XIRR_X 0x2FC
#define H_RANDOM 0x300
#define H_COP 0x304
#define H_GET_MPP_X 0x314
#define H_SET_MODE 0x31C
#define MAX_HCALL_OPCODE H_SET_MODE
+/* Platform specific hcalls, used by KVM */
+#define H_RTAS 0xf000
+
#ifndef __ASSEMBLY__
/**
@@ -395,6 +399,15 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
+
+extern long pSeries_enable_reloc_on_exc(void);
+extern long pSeries_disable_reloc_on_exc(void);
+
+#else
+
+#define pSeries_enable_reloc_on_exc() do {} while (0)
+#define pSeries_disable_reloc_on_exc() do {} while (0)
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 4234245..eb0f4ac 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -24,16 +24,30 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-
struct arch_hw_breakpoint {
unsigned long address;
- unsigned long dabrx;
- int type;
- u8 len; /* length of the target data symbol */
- bool extraneous_interrupt;
+ u16 type;
+ u16 len; /* length of the target data symbol */
};
+/* Note: Don't change the the first 6 bits below as they are in the same order
+ * as the dabr and dabrx.
+ */
+#define HW_BRK_TYPE_READ 0x01
+#define HW_BRK_TYPE_WRITE 0x02
+#define HW_BRK_TYPE_TRANSLATE 0x04
+#define HW_BRK_TYPE_USER 0x08
+#define HW_BRK_TYPE_KERNEL 0x10
+#define HW_BRK_TYPE_HYP 0x20
+#define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80
+
+/* bits that overlap with the bottom 3 bits of the dabr */
+#define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)
+#define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE)
+#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
+ HW_BRK_TYPE_HYP)
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
#include <asm/debug.h>
@@ -43,8 +57,6 @@ struct pmu;
struct perf_sample_data;
#define HW_BREAKPOINT_ALIGN 0x7
-/* Maximum permissible length of any HW Breakpoint */
-#define HW_BREAKPOINT_LEN 0x8
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
@@ -62,7 +74,12 @@ extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- set_dabr(0, 0);
+ struct arch_hw_breakpoint brk;
+
+ brk.address = 0;
+ brk.type = 0;
+ brk.len = 0;
+ set_breakpoint(&brk);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index f94ef42..dd15e5e 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -15,10 +15,6 @@
extern int check_legacy_ioport(unsigned long base_port);
#define I8042_DATA_REG 0x60
#define FDC_BASE 0x3f0
-/* only relevant for PReP */
-#define _PIDXR 0x279
-#define _PNPWRP 0xa79
-#define PNPBIOS_BASE 0xf000
#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
extern struct pci_dev *isa_bridge_pcidev;
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382..6e9f858 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+ BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bc81842..349ed85 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -142,6 +142,8 @@ extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
@@ -156,7 +158,8 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
-extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
+ unsigned long gpa, bool dirty);
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 38bec1d..9c1ff33 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -268,4 +268,17 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
}
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+ struct revmap_entry *rev)
+{
+ if (atomic_read(&kvm->arch.hpte_mod_interest))
+ rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 88609b2..9039d3c 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -20,6 +20,11 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
+/* XICS ICP register offsets */
+#define XICS_XIRR 4
+#define XICS_MFRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -81,10 +86,11 @@ struct kvmppc_host_state {
#ifdef CONFIG_KVM_BOOK3S_64_HV
u8 hwthread_req;
u8 hwthread_state;
-
+ u8 host_ipi;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys;
+ u32 saved_xirr;
u64 dabr;
u64 host_mmcr[3];
u32 host_pmc[8];
@@ -93,6 +99,9 @@ struct kvmppc_host_state {
u64 host_dscr;
u64 dec_expires;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u64 cfar;
+#endif
};
struct kvmppc_book3s_shadow_vcpu {
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 25a8d52..8a9d694 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -37,10 +37,8 @@
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -195,6 +193,10 @@ struct kvmppc_linear_info {
int type;
};
+/* XICS components, defined in book3s_xics.c */
+struct kvmppc_xics;
+struct kvmppc_icp;
+
/*
* The reverse mapping array has one entry for each HPTE,
* which stores the guest's view of the second word of the HPTE
@@ -266,10 +268,14 @@ struct kvm_arch {
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
#endif
#ifdef CONFIG_KVM_MPIC
struct openpic *mpic;
#endif
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_xics *xics;
+#endif
};
/*
@@ -315,11 +321,13 @@ struct kvmppc_vcore {
* that a guest can register.
*/
struct kvmppc_vpa {
+ unsigned long gpa; /* Current guest phys addr */
void *pinned_addr; /* Address in kernel linear mapping */
void *pinned_end; /* End of region */
unsigned long next_gpa; /* Guest phys addr for update */
unsigned long len; /* Number of bytes required */
u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+ bool dirty; /* true => area has been modified by kernel */
};
struct kvmppc_pte {
@@ -391,6 +399,7 @@ struct kvmppc_booke_debug_reg {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
+#define KVMPPC_IRQ_XICS 2
struct openpic;
@@ -464,6 +473,7 @@ struct kvm_vcpu_arch {
ulong uamor;
u32 ctrl;
ulong dabr;
+ ulong cfar;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
@@ -587,6 +597,9 @@ struct kvm_vcpu_arch {
int irq_type; /* one of KVM_IRQ_* */
int irq_cpu_id;
struct openpic *mpic; /* KVM_IRQ_MPIC */
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_icp *icp; /* XICS presentation controller */
+#endif
#ifdef CONFIG_KVM_BOOK3S_64_HV
struct kvm_vcpu_arch_shared shregs;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0c4f5f2..e4474f8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -130,6 +130,7 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
@@ -151,7 +152,7 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old);
+ const struct kvm_memory_slot *old);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -166,6 +167,16 @@ extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
+extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
+extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
@@ -255,6 +266,21 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
paca[cpu].kvm_hstate.xics_phys = addr;
}
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ u32 xirr = get_paca()->kvm_hstate.saved_xirr;
+
+ get_paca()->kvm_hstate.saved_xirr = 0;
+
+ return xirr;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{
+ paca[cpu].kvm_hstate.host_ipi = host_ipi;
+}
+
+extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
extern void kvm_linear_init(void);
#else
@@ -263,6 +289,46 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
static inline void kvm_linear_init(void)
{}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_kick(vcpu);
+}
+#endif
+
+#ifdef CONFIG_KVM_XICS
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
+}
+extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
+extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
+extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+#else
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
+ unsigned long server)
+ { return -EINVAL; }
+static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
+ struct kvm_irq_level *args)
+ { return -ENOTTY; }
+static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+ { return 0; }
#endif
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
@@ -371,4 +437,6 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
return ea;
}
+extern void xics_wake_cpu(int cpu);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 0000000..b36f650
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_POWERPC_LINKAGE_H
+#define _ASM_POWERPC_LINKAGE_H
+
+#ifdef CONFIG_PPC64
+#define cond_syscall(x) \
+ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
+ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
+ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#endif
+
+#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 531fe0c..b1e7f2a 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -145,7 +145,7 @@ struct dtl_entry {
extern struct kmem_cache *dtl_cache;
/*
- * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
* DTL entries, it can set this pointer to a function that will get
* called once for each DTL entry that gets processed.
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index ce449e0..536aded 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -30,6 +30,7 @@ struct file;
struct pci_controller;
struct kimage;
struct msi_region;
+struct pci_host_bridge;
struct machdep_calls {
char *name;
@@ -51,7 +52,8 @@ struct machdep_calls {
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
- int psize, int ssize);
+ int psize, int apsize,
+ int ssize);
long (*hpte_remove)(unsigned long hpte_group);
void (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
@@ -108,6 +110,8 @@ struct machdep_calls {
void (*pcibios_fixup)(void);
int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
+ int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ *bridge);
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
@@ -188,6 +192,10 @@ struct machdep_calls {
int (*set_dabr)(unsigned long dabr,
unsigned long dabrx);
+ /* Set DAWR for this platform, leave empty for default implemenation */
+ int (*set_dawr)(unsigned long dawr,
+ unsigned long dawrx);
+
#ifdef CONFIG_PPC32 /* XXX for now */
/* A general init function, called by ppc_init in init/main.c.
May be NULL. */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index dcc733c..ff53ea6 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -220,6 +220,7 @@
#define TLBILX_T_CLASS3 7
#ifndef __ASSEMBLY__
+#include <asm/bug.h>
extern unsigned int tlbcam_index;
@@ -236,6 +237,10 @@ typedef struct {
u64 high_slices_psize; /* 4 bits per slice for now */
u16 user_psize; /* page size index */
#endif
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
/* Page size definitions, common between 32 and 64-bit
@@ -255,6 +260,23 @@ struct mmu_psize_def
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
/* The page sizes use the same names as 64-bit hash but are
* constants
*/
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a..2accc96 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -21,6 +21,7 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/pgtable-ppc64.h>
+#include <asm/bug.h>
/*
* Segment table
@@ -154,11 +155,29 @@ extern unsigned long htab_hash_mask;
struct mmu_psize_def
{
unsigned int shift; /* number of bits */
- unsigned int penc; /* HPTE encoding */
+ int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
#endif /* __ASSEMBLY__ */
@@ -181,6 +200,13 @@ struct mmu_psize_def
*/
#define VPN_SHIFT 12
+/*
+ * HPTE Large Page (LP) details
+ */
+#define LP_SHIFT 12
+#define LP_BITS 8
+#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
+
#ifndef __ASSEMBLY__
static inline int segment_shift(int ssize)
@@ -193,7 +219,6 @@ static inline int segment_shift(int ssize)
/*
* The current system page and segment sizes
*/
-extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
@@ -237,14 +262,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
/*
* This function sets the AVPN and L fields of the HPTE appropriately
- * for the page size
+ * using the base page size and actual page size.
*/
-static inline unsigned long hpte_encode_v(unsigned long vpn,
- int psize, int ssize)
+static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
+ int actual_psize, int ssize)
{
unsigned long v;
- v = hpte_encode_avpn(vpn, psize, ssize);
- if (psize != MMU_PAGE_4K)
+ v = hpte_encode_avpn(vpn, base_psize, ssize);
+ if (actual_psize != MMU_PAGE_4K)
v |= HPTE_V_LARGE;
return v;
}
@@ -254,19 +279,17 @@ static inline unsigned long hpte_encode_v(unsigned long vpn,
* for the page size. We assume the pa is already "clean" that is properly
* aligned for the requested page size
*/
-static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
+static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
+ int actual_psize)
{
- unsigned long r;
-
/* A 4K page needs no special encoding */
- if (psize == MMU_PAGE_4K)
+ if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
else {
- unsigned int penc = mmu_psize_defs[psize].penc;
- unsigned int shift = mmu_psize_defs[psize].shift;
- return (pa & ~((1ul << shift) - 1)) | (penc << 12);
+ unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
+ unsigned int shift = mmu_psize_defs[actual_psize].shift;
+ return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
}
- return r;
}
/*
@@ -319,7 +342,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
unsigned int shift, unsigned int mmu_psize);
extern void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
- int ssize, int psize, unsigned long pte);
+ int ssize, int psize, int lpsize,
+ unsigned long pte);
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot,
int psize, int ssize);
@@ -343,17 +367,16 @@ extern void slb_set_size(u16 size);
/*
* VSID allocation (256MB segment)
*
- * We first generate a 38-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID | 1 << 37, for user addresses it is:
- * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
- *
- * This splits the proto-VSID into the below range
- * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
- * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
*
- * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
- * That is, we assign half of the space to user processes and half
- * to the kernel.
+ * For user processes max context id is limited to ((1ul << 19) - 5)
+ * for kernel space, we use the top 4 context ids to map address as below
+ * NOTE: each context only support 64TB now.
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@@ -363,41 +386,49 @@ extern void slb_set_size(u16 size);
* VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
- *
- * This scheme has several advantages over older methods:
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
*
- * - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
+ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
+ * bad address. This enables us to consolidate bad address handling in
+ * hash_page.
*
- * - We allow for USER_ESID_BITS significant bits of ESID and
- * CONTEXT_BITS bits of context for user addresses.
- * i.e. 64T (46 bits) of address space for up to half a million contexts.
- *
- * - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results). The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble. But the vmemmap
+ * (which is what uses region 0xf) will never be close to 64TB in size
+ * (it's 56 bytes per page of system memory).
+ */
+
+#define CONTEXT_BITS 19
+#define ESID_BITS 18
+#define ESID_BITS_1T 6
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. The top 4 contexts are used for
+ * kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
*/
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
/*
* This should be computed such that protovosid * vsid_mulitplier
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_256M 38
+#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T 26
+#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
-#define CONTEXT_BITS 19
-#define USER_ESID_BITS 18
-#define USER_ESID_BITS_1T 6
-#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@@ -421,7 +452,8 @@ extern void slb_set_size(u16 size);
srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \
- /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
+ /* NOTE: explanation based on VSID_BITS_##size = 36 \
+ * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \
@@ -490,6 +522,10 @@ typedef struct {
unsigned long acop; /* mask of enabled coprocessor types */
unsigned int cop_pid; /* pid value used with coprocessors */
#endif /* CONFIG_PPC_ICSWX */
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
@@ -513,34 +549,6 @@ typedef struct {
})
#endif /* 1 */
-/*
- * This is only valid for addresses >= PAGE_OFFSET
- * The proto-VSID space is divided into two class
- * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
- * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
- *
- * With KERNEL_START at 0xc000000000000000, the proto vsid for
- * the kernel ends up with 0xc00000000 (36 bits). With 64TB
- * support we need to have kernel proto-VSID in the
- * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
- */
-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
-{
- unsigned long proto_vsid;
- /*
- * We need to make sure proto_vsid for the kernel is
- * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
- */
- if (ssize == MMU_SEGSIZE_256M) {
- proto_vsid = ea >> SID_SHIFT;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
- return vsid_scramble(proto_vsid, 256M);
- }
- proto_vsid = ea >> SID_SHIFT_1T;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
- return vsid_scramble(proto_vsid, 1T);
-}
-
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
{
@@ -550,17 +558,41 @@ static inline int user_segment_size(unsigned long addr)
return MMU_SEGSIZE_256M;
}
-/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
+ /*
+ * Bad address. We return VSID 0 for that
+ */
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
+ return 0;
+
if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << USER_ESID_BITS)
+ return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M);
- return vsid_scramble((context << USER_ESID_BITS_1T)
+ return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T);
}
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ *
+ * For kernel space, we use the top 4 context ids to map address as below
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ unsigned long context;
+
+ /*
+ * kernel take the top 4 context from the available range
+ */
+ context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+ return get_vsid(context, ea, ssize);
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 8c0ab2c..885c040 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -53,4 +53,21 @@ struct mpc512x_ccm {
u32 m4ccr; /* MSCAN4 CCR */
u8 res[0x98]; /* Reserved */
};
+
+/*
+ * LPC Module
+ */
+struct mpc512x_lpc {
+ u32 cs_cfg[8]; /* CS config */
+ u32 cs_ctrl; /* CS Control Register */
+ u32 cs_status; /* CS Status Register */
+ u32 burst_ctrl; /* CS Burst Control Register */
+ u32 deadcycle_ctrl; /* CS Deadcycle Control Register */
+ u32 holdcycle_ctrl; /* CS Holdcycle Control Register */
+ u32 alt; /* Address Latch Timing Register */
+};
+
+int mpc512x_cs_config(unsigned int cs, u32 val);
+int __init mpc5121_clk_init(void);
+
#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a4b28f1..cbb9305 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -117,6 +117,7 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_SET_SLOT_LED_STATUS 55
#define OPAL_GET_EPOW_STATUS 56
#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_PCI_MSI_EOI 63
#ifndef __ASSEMBLY__
@@ -242,7 +243,8 @@ enum OpalMCE_TlbErrorType {
enum OpalThreadStatus {
OPAL_THREAD_INACTIVE = 0x0,
- OPAL_THREAD_STARTED = 0x1
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
};
enum OpalPciBusCompare {
@@ -506,6 +508,7 @@ int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t *p_bit, uint8_t *q_bit);
int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t p_bit, uint8_t q_bit);
+int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
uint32_t xive_num);
int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
@@ -561,6 +564,8 @@ extern void opal_nvram_init(void);
extern int opal_machine_check(struct pt_regs *regs);
+extern void opal_shutdown(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 6733e86..7b6d603 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -93,9 +93,9 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[11] __attribute__((aligned(0x80)));
- u64 exmc[11]; /* used for machine checks */
- u64 exslb[11]; /* used for SLB/segment table misses
+ u64 exgen[12] __attribute__((aligned(0x80)));
+ u64 exmc[12]; /* used for machine checks */
+ u64 exslb[12]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
@@ -156,6 +156,9 @@ struct paca_struct {
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
u64 sprg3; /* Saved user-visible sprg */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_scratch; /* TM scratch area for reclaim */
+#endif
#ifdef CONFIG_PPC_POWERNV
/* Pointer to OPAL machine check event structure set by the
@@ -186,7 +189,6 @@ struct paca_struct {
};
extern struct paca_struct *paca;
-extern __initdata struct paca_struct boot_paca;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void setup_paca(struct paca_struct *new_paca);
extern void allocate_pacas(void);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..988c812 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -249,6 +249,7 @@ extern long long virt_phys_offset;
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#endif
+#ifndef CONFIG_PPC_BOOK3S_64
/*
* Use the top bit of the higher-level page table entries to indicate whether
* the entries we point to contain hugepages. This works because we know that
@@ -260,6 +261,7 @@ extern long long virt_phys_offset;
#else
#define PD_HUGE 0x80000000
#endif
+#endif /* CONFIG_PPC_BOOK3S_64 */
/*
* Some number of bits at the level of the page table that points to
@@ -354,14 +356,27 @@ typedef unsigned long pgprot_t;
typedef struct { signed long pd; } hugepd_t;
#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ /*
+ * hugepd pointer, bottom two bits == 00 and next 4 bits
+ * indicate size of table
+ */
+ return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+}
+#else
static inline int hugepd_ok(hugepd_t hpd)
{
return (hpd.pd > 0);
}
+#endif
#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
+int pgd_huge(pgd_t pgd);
#else /* CONFIG_HUGETLB_PAGE */
#define is_hugepd(pdep) 0
+#define pgd_huge(pgd) 0
#endif /* CONFIG_HUGETLB_PAGE */
struct page;
@@ -378,7 +393,11 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
+#ifdef CONFIG_PPC_64K_PAGES
+typedef pte_t *pgtable_t;
+#else
typedef struct page *pgtable_t;
+#endif
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index cd915d6..88693ce 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -99,8 +99,7 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr,
unsigned long len,
unsigned long flags,
unsigned int psize,
- int topdown,
- int use_cache);
+ int topdown);
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 6dc2577..a452968 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -21,9 +21,7 @@ static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
int count = 0;
int virq;
- for (np = NULL; (np = of_find_compatible_node(np,
- "parallel",
- "pnpPNP,400")) != NULL;) {
+ for_each_compatible_node(np, "parallel", "pnpPNP,400") {
prop = of_get_property(np, "reg", &propsize);
if (!prop || propsize > 6*sizeof(u32))
continue;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ffbc5fd..2c1d8cb 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -39,11 +39,6 @@ struct pci_controller {
resource_size_t io_base_phys;
resource_size_t pci_io_size;
- /* Some machines (PReP) have a non 1:1 mapping of
- * the PCI memory space in the CPU bus space
- */
- resource_size_t pci_mem_offset;
-
/* Some machines have a special region to forward the ISA
* "memory" cycles such as VGA memory regions. Left to 0
* if unsupported
@@ -86,6 +81,7 @@ struct pci_controller {
*/
struct resource io_resource;
struct resource mem_resources[3];
+ resource_size_t mem_offset[3];
int global_number; /* PCI domain number */
resource_size_t dma_window_base_cur;
@@ -163,6 +159,8 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
+ int force_32bit_msi:1;
+
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
@@ -176,6 +174,8 @@ struct pci_dn {
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+
extern void * update_dn_pci_info(struct device_node *dn, void *data);
static inline int pci_device_from_OF_node(struct device_node *np,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9710be3..f265049 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
+#include <linux/device.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@@ -32,24 +33,33 @@ struct power_pmu {
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
+ u64 (*bhrb_filter_map)(u64 branch_sample_type);
+ void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
+ const struct attribute_group **attr_groups;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+ /* BHRB entries in the PMU */
+ int bhrb_nr;
};
/*
* Values for power_pmu.flags
*/
-#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
-#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
-#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
-#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
-#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */
+#define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
+#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
+#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
+#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
+#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
/*
* Values for flags to get_alternatives()
@@ -63,6 +73,7 @@ extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long int read_bhrb(int n);
/*
* Only override the default definitions in include/linux/perf_event.h
@@ -109,3 +120,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ */
+#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
+#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
+
+#define EVENT_ATTR(_name, _id, _suffix) \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
+ power_events_sysfs_show)
+
+#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
+#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
+
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
+#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 580cf73..27b2386 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -37,6 +37,17 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
static inline void pgtable_free(void *table, unsigned index_size)
{
BUG_ON(index_size); /* 32-bit doesn't use this */
@@ -45,4 +56,38 @@ static inline void pgtable_free(void *table, unsigned index_size)
#define check_pgt_cache() do { } while (0)
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ struct page *page = page_address(table);
+
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(page);
+ pgtable_free_tlb(tlb, page, 0);
+}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 292725c..b66ae72 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -35,7 +35,10 @@ struct vmemmap_backing {
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -72,8 +75,100 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ pgtable_page_ctor(page);
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else /* !CONFIG_SMP */
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif /* CONFIG_SMP */
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ struct page *page = page_address(table);
+
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(page);
+ pgtable_free_tlb(tlb, page, 0);
+}
+
+#else /* if CONFIG_PPC_64K_PAGES */
+/*
+ * we support 16 fragments per PTE page.
+ */
+#define PTE_FRAG_NR 16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
-#else /* CONFIG_PPC_64K_PAGES */
+extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
+extern void page_table_free(struct mm_struct *, unsigned long *, int);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+#ifdef CONFIG_SMP
+extern void __tlb_remove_table(void *_table);
+#endif
#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
@@ -83,51 +178,56 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pmd_set(pmd, (unsigned long)pte);
}
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ pmd_set(pmd, (unsigned long)pte_page);
}
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
- kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
+ return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)page_table_alloc(mm, address, 1);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page;
- pte_t *pte;
+ return (pgtable_t)page_table_alloc(mm, address, 0);
+}
- pte = pte_alloc_one_kernel(mm, address);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- pgtable_page_ctor(page);
- return page;
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ page_table_free(mm, (unsigned long *)pte, 1);
}
-static inline void pgtable_free(void *table, unsigned index_size)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
- if (!index_size)
- free_page((unsigned long)table);
- else {
- BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
- kmem_cache_free(PGT_CACHE(index_size), table);
- }
+ page_table_free(mm, (unsigned long *)ptepage, 0);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
}
#define __pmd_free_tlb(tlb, pmd, addr) \
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index bf301ac..e9a9f60 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <linux/mm.h>
+#include <asm-generic/tlb.h>
#ifdef CONFIG_PPC_BOOK3E
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
@@ -13,56 +14,11 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
}
#endif /* !CONFIG_PPC_BOOK3E */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif
-#ifdef CONFIG_SMP
-struct mmu_gather;
-extern void tlb_remove_table(struct mmu_gather *, void *);
-
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
- unsigned long pgf = (unsigned long)table;
- BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
- pgf |= shift;
- tlb_remove_table(tlb, (void *)pgf);
-}
-
-static inline void __tlb_remove_table(void *_table)
-{
- void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
- unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
-
- pgtable_free(table, shift);
-}
-#else /* CONFIG_SMP */
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
-{
- pgtable_free(table, shift);
-}
-#endif /* !CONFIG_SMP */
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
- unsigned long address)
-{
- tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(ptepage);
- pgtable_free_tlb(tlb, page_address(ptepage), 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index be4e287..45142d6 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -4,10 +4,10 @@
#include <asm-generic/pgtable-nopud.h>
-#define PTE_INDEX_SIZE 12
-#define PMD_INDEX_SIZE 12
+#define PTE_INDEX_SIZE 8
+#define PMD_INDEX_SIZE 10
#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 6
+#define PGD_INDEX_SIZE 12
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 0182c20..e3d55f6f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -167,8 +167,7 @@
* Find an entry in a page-table-directory. We combine the address region
* (the high order N bits) and the pgd portion of the address.
*/
-/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 1f0fe49..ba587df 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -17,6 +17,12 @@ struct mm_struct;
# include <asm/pgtable-ppc32.h>
#endif
+/*
+ * We save the slot number & secondary bit in the second half of the
+ * PTE page. We use the 8 bytes per each pte entry.
+ */
+#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
+
#ifndef __ASSEMBLY__
#include <asm/tlbflush.h>
@@ -219,6 +225,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr);
+extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4324034..187c259 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -128,6 +128,8 @@
#define OP_STHU 45
/* sorted alphabetically */
+#define PPC_INST_BHRBE 0x7c00025c
+#define PPC_INST_CLRBHRB 0x7c00035c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
@@ -146,6 +148,7 @@
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
@@ -158,6 +161,10 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
@@ -174,6 +181,9 @@
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
#define PPC_INST_XVCPSGNDP 0xf0000780
+#define PPC_INST_TRECHKPT 0x7c0007dd
+#define PPC_INST_TRECLAIM 0x7c00075d
+#define PPC_INST_TABORT 0x7c00071d
#define PPC_INST_NAP 0x4c000364
#define PPC_INST_SLEEP 0x4c0003a4
@@ -273,6 +283,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
+#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
+ ___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
__PPC_RA(a) | __PPC_RS(s))
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
@@ -337,4 +349,17 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+/* BHRB instructions */
+#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
+#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
+ __PPC_RT(r) | \
+ (((n) & 0x3ff) << 11))
+
+/* Transactional memory instructions */
+#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
+#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
+ | __PPC_RA(r))
+#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \
+ | __PPC_RA(r))
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h
new file mode 100644
index 0000000..6ce9046
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx_ocm.h
@@ -0,0 +1,45 @@
+/*
+ * PowerPC 4xx OCM memory allocation support
+ *
+ * (C) Copyright 2009, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@amcc.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_POWERPC_PPC4XX_OCM_H__
+#define __ASM_POWERPC_PPC4XX_OCM_H__
+
+#define PPC4XX_OCM_NON_CACHED 0
+#define PPC4XX_OCM_CACHED 1
+
+#if defined(CONFIG_PPC4xx_OCM)
+
+void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
+ int flags, const char *owner);
+void ppc4xx_ocm_free(const void *virt);
+
+#else
+
+#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL
+#define ppc4xx_ocm_free(addr) ((void)0)
+
+#endif /* CONFIG_PPC4xx_OCM */
+
+#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3a853ae..8864fcd 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,13 +24,12 @@
* user_time and system_time fields in the paca.
*/
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
#define ACCOUNT_CPU_USER_EXIT(ra, rb)
#define ACCOUNT_STOLEN_TIME
#else
#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
- beq 2f; /* if from kernel mode */ \
MFTB(ra); /* get timebase */ \
ld rb,PACA_STARTTIME_USER(r13); \
std ra,PACA_STARTTIME(r13); \
@@ -38,7 +37,6 @@
ld ra,PACA_USER_TIME(r13); \
add ra,ra,rb; /* add on to user time */ \
std ra,PACA_USER_TIME(r13); \
-2:
#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
MFTB(ra); /* get timebase */ \
@@ -70,7 +68,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_PPC_SPLPAR */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
/*
* Macros for storing registers into and loading registers from
@@ -125,6 +123,89 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
+ * thread_struct:
+ */
+#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \
+ 8*TS_FPRWIDTH*(n)(base)
+#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \
+ SAVE_FPR_TRANSACT(n+1, base)
+#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
+ SAVE_2FPRS_TRANSACT(n+2, base)
+#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
+ SAVE_4FPRS_TRANSACT(n+4, base)
+#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
+ SAVE_8FPRS_TRANSACT(n+8, base)
+#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
+ SAVE_16FPRS_TRANSACT(n+16, base)
+
+#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
+ 8*TS_FPRWIDTH*(n)(base)
+#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
+ REST_FPR_TRANSACT(n+1, base)
+#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
+ REST_2FPRS_TRANSACT(n+2, base)
+#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
+ REST_4FPRS_TRANSACT(n+4, base)
+#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
+ REST_8FPRS_TRANSACT(n+8, base)
+#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
+ REST_16FPRS_TRANSACT(n+16, base)
+
+
+#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
+ stvx n,b,base
+#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
+ SAVE_VR_TRANSACT(n+1,b,base)
+#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
+ SAVE_2VRS_TRANSACT(n+2,b,base)
+#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
+ SAVE_4VRS_TRANSACT(n+4,b,base)
+#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
+ SAVE_8VRS_TRANSACT(n+8,b,base)
+#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
+ SAVE_16VRS_TRANSACT(n+16,b,base)
+
+#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
+ lvx n,b,base
+#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
+ REST_VR_TRANSACT(n+1,b,base)
+#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
+ REST_2VRS_TRANSACT(n+2,b,base)
+#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
+ REST_4VRS_TRANSACT(n+4,b,base)
+#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
+ REST_8VRS_TRANSACT(n+8,b,base)
+#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
+ REST_16VRS_TRANSACT(n+16,b,base)
+
+
+#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
+ STXVD2X(n,R##base,R##b)
+#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
+ SAVE_VSR_TRANSACT(n+1,b,base)
+#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
+ SAVE_2VSRS_TRANSACT(n+2,b,base)
+#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
+ SAVE_4VSRS_TRANSACT(n+4,b,base)
+#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
+ SAVE_8VSRS_TRANSACT(n+8,b,base)
+#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
+ SAVE_16VSRS_TRANSACT(n+16,b,base)
+
+#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
+ LXVD2X(n,R##base,R##b)
+#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
+ REST_VSR_TRANSACT(n+1,b,base)
+#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
+ REST_2VSRS_TRANSACT(n+2,b,base)
+#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
+ REST_4VSRS_TRANSACT(n+4,b,base)
+#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
+ REST_8VSRS_TRANSACT(n+8,b,base)
+#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
+ REST_16VSRS_TRANSACT(n+16,b,base)
+
/* Save the lower 32 VSRs in the thread VSR region */
#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
@@ -391,6 +472,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
FTR_SECTION_ELSE_NESTED(848); \
mtocrf (FXM), RS; \
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+
+/*
+ * PPR restore macros used in entry_64.S
+ * Used for P7 or later processors
+ */
+#define HMT_MEDIUM_LOW_HAS_PPR \
+BEGIN_FTR_SECTION_NESTED(944) \
+ HMT_MEDIUM_LOW; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
+
+#define SET_DEFAULT_THREAD_PPR(ra, rb) \
+BEGIN_FTR_SECTION_NESTED(945) \
+ lis ra,INIT_PPR@highest; /* default ppr=3 */ \
+ ld rb,PACACURRENT(r13); \
+ sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \
+ std ra,TASKTHREADPPR(rb); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
+
+#define RESTORE_PPR(ra, rb) \
+BEGIN_FTR_SECTION_NESTED(946) \
+ ld ra,PACACURRENT(r13); \
+ ld rb,TASKTHREADPPR(ra); \
+ mtspr SPRN_PPR,rb; /* Restore PPR */ \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
+
#endif
/*
@@ -417,6 +523,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define PPC440EP_ERR42
#endif
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly). The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
+.machine push ; \
+.machine "power4" ; \
+ lis scratch,0x60000000@h; \
+ dcbt r0,scratch,0b01010; \
+.machine pop
+
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
* keep the address intact to be compatible with code shared with
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8750204..14a6583 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -18,18 +18,29 @@
#define TS_FPRWIDTH 1
#endif
+#ifdef CONFIG_PPC64
+/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
+#define PPR_PRIORITY 3
+#ifdef __ASSEMBLY__
+#define INIT_PPR (PPR_PRIORITY << 50)
+#else
+#define INIT_PPR ((u64)PPR_PRIORITY << 50)
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PPC64 */
+
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/hw_breakpoint.h>
/* We do _not_ want to define new machine types at all, those must die
* in favor of using the device-tree
* -- BenH.
*/
-/* PREP sub-platform types see residual.h for these */
+/* PREP sub-platform types. Unused */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
@@ -45,13 +56,6 @@
extern int _chrp_type;
-#ifdef CONFIG_PPC_PREP
-
-/* what kind of prep workstation we are */
-extern int _prep_type;
-
-#endif /* CONFIG_PPC_PREP */
-
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
/*
@@ -141,6 +145,7 @@ typedef struct {
#define TS_FPROFFSET 0
#define TS_VSRLOWOFFSET 1
#define TS_FPR(i) fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
@@ -215,8 +220,7 @@ struct thread_struct {
struct perf_event *last_hit_ubp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
- unsigned long dabr; /* Data address breakpoint register */
- unsigned long dabrx; /* ... extension */
+ struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
@@ -236,6 +240,34 @@ struct thread_struct {
unsigned long spefscr; /* SPE & eFP status */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_tfhar; /* Transaction fail handler addr */
+ u64 tm_texasr; /* Transaction exception & summary */
+ u64 tm_tfiar; /* Transaction fail instr address reg */
+ unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
+ struct pt_regs ckpt_regs; /* Checkpointed registers */
+
+ /*
+ * Transactional FP and VSX 0-31 register set.
+ * NOTE: the sense of these is the opposite of the integer ckpt_regs!
+ *
+ * When a transaction is active/signalled/scheduled etc., *regs is the
+ * most recent set of/speculated GPRs with ckpt_regs being the older
+ * checkpointed regs to which we roll back if transaction aborts.
+ *
+ * However, fpr[] is the checkpointed 'base state' of FP regs, and
+ * transact_fpr[] is the new set of transactional values.
+ * VRs work the same way.
+ */
+ double transact_fpr[32][TS_FPRWIDTH];
+ struct {
+ unsigned int pad;
+ unsigned int val; /* Floating point status */
+ } transact_fpscr;
+ vector128 transact_vr[32] __attribute__((aligned(16)));
+ vector128 transact_vscr __attribute__((aligned(16)));
+ unsigned long transact_vrsave;
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
@@ -245,6 +277,19 @@ struct thread_struct {
#ifdef CONFIG_PPC64
unsigned long dscr;
int dscr_inherit;
+ unsigned long ppr; /* used to save/restore SMT priority */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long tar;
+ unsigned long ebbrr;
+ unsigned long ebbhr;
+ unsigned long bescr;
+ unsigned long siar;
+ unsigned long sdar;
+ unsigned long sier;
+ unsigned long mmcr0;
+ unsigned long mmcr2;
+ unsigned long mmcra;
#endif
};
@@ -278,6 +323,7 @@ struct thread_struct {
.fpr = {{0}}, \
.fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
+ .ppr = INIT_PPR, \
}
#endif
@@ -363,21 +409,16 @@ static inline void prefetchw(const void *x)
#endif
#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- unsigned long sp;
-
if (is_32)
- sp = regs->gpr[1] & 0x0ffffffffUL;
- else
- sp = regs->gpr[1];
-
+ return sp & 0x0ffffffffUL;
return sp;
}
#else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- return regs->gpr[1];
+ return sp;
}
#endif
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 99c92d5..bc2da15 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -74,6 +74,75 @@ struct of_drconf_cell {
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
+/*
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node. For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
+ */
+
+/* New method - extensible architecture description vector. */
+
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE 0x80 /* ignore this vector */
+#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP 0x80 /* floating point */
+#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported
+ * These bits are also used in firmware_has_feature() to validate
+ * the capabilities reported for vector 5 in the device tree so we
+ * encode the vector index in the define and use the OV5_FEAT()
+ * and OV5_INDX() macros to extract the desired information.
+ */
+#define OV5_FEAT(x) ((x) & 0xff)
+#define OV5_INDX(x) ((x) >> 8)
+#define OV5_LPAR 0x0280 /* logical partitioning supported */
+#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY 0x0220
+#define OV5_LARGE_PAGES 0x0210 /* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */
+#define OV5_MSI 0x0201 /* PCIe/MSI support */
+#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x0440 /* Page Coalescing */
+#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
+#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
+#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x0F01 /* 1,2,or 4 Sub-Processors supported */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX 0x02 /* Linux is our OS */
+
+/*
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ */
+extern unsigned char ibm_architecture_vec[];
+
/* These includes are put at the bottom because they may contain things
* that are overridden by this file. Ideally they shouldn't be included
* by this file, but there are a bunch of .c files that currently depend
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 0e15db4..678a7c1 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -245,7 +245,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 3e13e23..d836d94 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,7 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), ((e) & _PAGE_COMBO) ? \
+ (e), (pte_val(e) & _PAGE_COMBO) ? \
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 5f99568..becc08e 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -92,7 +92,8 @@ static inline long regs_return_value(struct pt_regs *regs)
} while(0)
struct task_struct;
-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
+extern int ptrace_get_reg(struct task_struct *task, int regno,
+ unsigned long *data);
extern int ptrace_put_reg(struct task_struct *task, int regno,
unsigned long data);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 16fbb66..1042485 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -29,6 +29,10 @@
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+#define MSR_TS_LG 33 /* Trans Mem state (2 bits) */
+#define MSR_TM_LG 32 /* Trans Mem Available */
#define MSR_VEC_LG 25 /* Enable AltiVec */
#define MSR_VSX_LG 23 /* Enable VSX */
#define MSR_POW_LG 18 /* Enable Power Management */
@@ -98,6 +102,15 @@
#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+#define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */
+#define MSR_TS_N 0 /* Non-transactional */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
+#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
@@ -193,6 +206,10 @@
#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
+#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
+#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
+#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -200,10 +217,12 @@
#define CTRL_CT1 0x40000000 /* thread 1 */
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
+#define SPRN_DAWR 0xB4
+#define SPRN_DAWRX 0xBC
+#define DAWRX_USER (1UL << 0)
+#define DAWRX_KERNEL (1UL << 1)
+#define DAWRX_HYP (1UL << 2)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
-#define DABR_TRANSLATION (1UL << 2)
-#define DABR_DATA_WRITE (1UL << 1)
-#define DABR_DATA_READ (1UL << 0)
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0)
@@ -235,6 +254,20 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
+#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
+#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
+#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
+#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
+#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
+#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
#define LPCR_VPM1 (1ul << (63-1))
@@ -256,6 +289,7 @@
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_MER_SH 11
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
@@ -289,6 +323,7 @@
#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+#define SPRN_PPR 0x380 /* SMT Thread status Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
@@ -483,6 +518,7 @@
#ifndef SPRN_PIR
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
+#define SPRN_TIR 0x1BE /* Thread Identification Register */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
@@ -595,6 +631,7 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
+#define SPRN_MMCR2 769
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -613,6 +650,13 @@
#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
+#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */
+#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */
+#define SPRN_MMCRC 851 /* Core monitor mode control register */
+#define SPRN_EBBHR 804 /* Event based branch handler register */
+#define SPRN_EBBRR 805 /* Event based branch return register */
+#define SPRN_BESCR 806 /* Branch event status and control register */
+
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -623,6 +667,11 @@
#define SPRN_PMC8 794
#define SPRN_SIAR 780
#define SPRN_SDAR 781
+#define SPRN_SIER 784
+#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
+#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
+#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
+#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
#define SPRN_PA6T_MMCR0 795
#define PA6T_MMCR0_EN0 0x0000000000000001UL
@@ -763,7 +812,7 @@
* HV mode in which case it is HSPRG0
*
* 64-bit server:
- * - SPRG0 unused (reserved for HV on Power4)
+ * - SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
* - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - HSPRG0 stores PACA in HV mode
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index aef00c6..34fd704 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -143,6 +143,8 @@ struct rtas_suspend_me_data {
#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+/* Platform Resource Reassignment Notification */
+#define RTAS_TYPE_PRRN 0xA0
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
@@ -262,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(struct rtas_args *);
struct rtc_time;
@@ -277,6 +281,10 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+#ifdef CONFIG_PPC_PSERIES
+extern int pseries_devicetree_update(s32 scope);
+#endif
+
#ifdef CONFIG_PPC_RTAS_DAEMON
extern void rtas_cancel_event_scan(void);
#else
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index a0f358d..4ee06fe 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -10,6 +10,9 @@
extern char __end_interrupts[];
+extern char __prom_init_toc_start[];
+extern char __prom_init_toc_end[];
+
static inline int in_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index a101637..9322c28 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -1,6 +1,10 @@
#ifndef _ASM_POWERPC_SIGNAL_H
#define _ASM_POWERPC_SIGNAL_H
+#define __ARCH_HAS_SA_RESTORER
#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 6edd1b5..82b9649 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -144,6 +144,8 @@ extern void __cpu_die(unsigned int cpu);
/* for UP */
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
+static inline void inhibit_secondary_onlining(void) {}
+static inline void uninhibit_secondary_onlining(void) {}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 7124fc0..5b23f91 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -96,7 +96,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
+#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index b5308d3..23be8f1 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -5,11 +5,8 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
-#include <asm/signal.h>
-struct pt_regs;
struct rtas_args;
-struct sigaction;
asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
@@ -17,20 +14,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
-asmlinkage long sys_pipe(int __user *fildes);
-asmlinkage long sys_pipe2(int __user *fildes, int flags);
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
asmlinkage long ppc64_personality(unsigned long personality);
asmlinkage int ppc_rtas(struct rtas_args __user *uargs);
-asmlinkage time_t sys64_time(time_t __user * tloc);
-
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
- size_t sigsetsize);
-asmlinkage long sys_sigaltstack(const stack_t __user *uss,
- stack_t __user *uoss, unsigned long r5, unsigned long r6,
- unsigned long r7, unsigned long r8, struct pt_regs *regs);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 97909d3b..43523fe 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -10,8 +10,8 @@ SYSCALL_SPU(read)
SYSCALL_SPU(write)
COMPAT_SYS_SPU(open)
SYSCALL_SPU(close)
-COMPAT_SYS_SPU(waitpid)
-COMPAT_SYS_SPU(creat)
+SYSCALL_SPU(waitpid)
+SYSCALL_SPU(creat)
SYSCALL_SPU(link)
SYSCALL_SPU(unlink)
COMPAT_SYS(execve)
@@ -22,7 +22,7 @@ SYSCALL_SPU(chmod)
SYSCALL_SPU(lchown)
SYSCALL(ni_syscall)
OLDSYS(stat)
-SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+COMPAT_SYS_SPU(lseek)
SYSCALL_SPU(getpid)
COMPAT_SYS(mount)
SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
@@ -36,13 +36,13 @@ SYSCALL(pause)
COMPAT_SYS(utime)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(access)
-COMPAT_SYS_SPU(nice)
+SYSCALL_SPU(access)
+SYSCALL_SPU(nice)
SYSCALL(ni_syscall)
SYSCALL_SPU(sync)
-COMPAT_SYS_SPU(kill)
+SYSCALL_SPU(kill)
SYSCALL_SPU(rename)
-COMPAT_SYS_SPU(mkdir)
+SYSCALL_SPU(mkdir)
SYSCALL_SPU(rmdir)
SYSCALL_SPU(dup)
SYSCALL_SPU(pipe)
@@ -60,10 +60,10 @@ SYSCALL(ni_syscall)
COMPAT_SYS_SPU(ioctl)
COMPAT_SYS_SPU(fcntl)
SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(setpgid)
+SYSCALL_SPU(setpgid)
SYSCALL(ni_syscall)
SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
-COMPAT_SYS_SPU(umask)
+SYSCALL_SPU(umask)
SYSCALL_SPU(chroot)
COMPAT_SYS(ustat)
SYSCALL_SPU(dup2)
@@ -72,23 +72,24 @@ SYSCALL_SPU(getpgrp)
SYSCALL_SPU(setsid)
SYS32ONLY(sigaction)
SYSCALL_SPU(sgetmask)
-COMPAT_SYS_SPU(ssetmask)
+SYSCALL_SPU(ssetmask)
SYSCALL_SPU(setreuid)
SYSCALL_SPU(setregid)
+#define compat_sys_sigsuspend sys_sigsuspend
SYS32ONLY(sigsuspend)
COMPAT_SYS(sigpending)
-COMPAT_SYS_SPU(sethostname)
+SYSCALL_SPU(sethostname)
COMPAT_SYS_SPU(setrlimit)
COMPAT_SYS(old_getrlimit)
COMPAT_SYS_SPU(getrusage)
COMPAT_SYS_SPU(gettimeofday)
COMPAT_SYS_SPU(settimeofday)
-COMPAT_SYS_SPU(getgroups)
-COMPAT_SYS_SPU(setgroups)
+SYSCALL_SPU(getgroups)
+SYSCALL_SPU(setgroups)
SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
SYSCALL_SPU(symlink)
OLDSYS(lstat)
-COMPAT_SYS_SPU(readlink)
+SYSCALL_SPU(readlink)
SYSCALL(uselib)
SYSCALL(swapon)
SYSCALL(reboot)
@@ -99,14 +100,14 @@ COMPAT_SYS_SPU(truncate)
COMPAT_SYS_SPU(ftruncate)
SYSCALL_SPU(fchmod)
SYSCALL_SPU(fchown)
-COMPAT_SYS_SPU(getpriority)
-COMPAT_SYS_SPU(setpriority)
+SYSCALL_SPU(getpriority)
+SYSCALL_SPU(setpriority)
SYSCALL(ni_syscall)
COMPAT_SYS(statfs)
COMPAT_SYS(fstatfs)
SYSCALL(ni_syscall)
COMPAT_SYS_SPU(socketcall)
-COMPAT_SYS_SPU(syslog)
+SYSCALL_SPU(syslog)
COMPAT_SYS_SPU(setitimer)
COMPAT_SYS_SPU(getitimer)
COMPAT_SYS_SPU(newstat)
@@ -124,7 +125,7 @@ COMPAT_SYS(ipc)
SYSCALL_SPU(fsync)
SYS32ONLY(sigreturn)
PPC_SYS(clone)
-COMPAT_SYS_SPU(setdomainname)
+SYSCALL_SPU(setdomainname)
SYSCALL_SPU(newuname)
SYSCALL(ni_syscall)
COMPAT_SYS_SPU(adjtimex)
@@ -135,10 +136,10 @@ SYSCALL(init_module)
SYSCALL(delete_module)
SYSCALL(ni_syscall)
SYSCALL(quotactl)
-COMPAT_SYS_SPU(getpgid)
+SYSCALL_SPU(getpgid)
SYSCALL_SPU(fchdir)
SYSCALL_SPU(bdflush)
-COMPAT_SYS(sysfs)
+SYSCALL_SPU(sysfs)
SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
SYSCALL(ni_syscall)
SYSCALL_SPU(setfsuid)
@@ -150,21 +151,21 @@ SYSCALL_SPU(flock)
SYSCALL_SPU(msync)
COMPAT_SYS_SPU(readv)
COMPAT_SYS_SPU(writev)
-COMPAT_SYS_SPU(getsid)
+SYSCALL_SPU(getsid)
SYSCALL_SPU(fdatasync)
COMPAT_SYS(sysctl)
SYSCALL_SPU(mlock)
SYSCALL_SPU(munlock)
SYSCALL_SPU(mlockall)
SYSCALL_SPU(munlockall)
-COMPAT_SYS_SPU(sched_setparam)
-COMPAT_SYS_SPU(sched_getparam)
-COMPAT_SYS_SPU(sched_setscheduler)
-COMPAT_SYS_SPU(sched_getscheduler)
+SYSCALL_SPU(sched_setparam)
+SYSCALL_SPU(sched_getparam)
+SYSCALL_SPU(sched_setscheduler)
+SYSCALL_SPU(sched_getscheduler)
SYSCALL_SPU(sched_yield)
-COMPAT_SYS_SPU(sched_get_priority_max)
-COMPAT_SYS_SPU(sched_get_priority_min)
-SYSX_SPU(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval_wrapper,sys_sched_rr_get_interval)
+SYSCALL_SPU(sched_get_priority_max)
+SYSCALL_SPU(sched_get_priority_min)
+COMPAT_SYS_SPU(sched_rr_get_interval)
COMPAT_SYS_SPU(nanosleep)
SYSCALL_SPU(mremap)
SYSCALL_SPU(setresuid)
@@ -174,7 +175,7 @@ SYSCALL_SPU(poll)
SYSCALL(ni_syscall)
SYSCALL_SPU(setresgid)
SYSCALL_SPU(getresgid)
-COMPAT_SYS_SPU(prctl)
+SYSCALL_SPU(prctl)
COMPAT_SYS(rt_sigreturn)
COMPAT_SYS(rt_sigaction)
COMPAT_SYS(rt_sigprocmask)
@@ -189,7 +190,7 @@ SYSCALL_SPU(getcwd)
SYSCALL_SPU(capget)
SYSCALL_SPU(capset)
COMPAT_SYS(sigaltstack)
-SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile)
+COMPAT_SYS_SPU(sendfile)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
PPC_SYS(vfork)
@@ -229,7 +230,7 @@ COMPAT_SYS_SPU(sched_setaffinity)
COMPAT_SYS_SPU(sched_getaffinity)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64)
+SYS32ONLY(sendfile64)
COMPAT_SYS_SPU(io_setup)
SYSCALL_SPU(io_destroy)
COMPAT_SYS_SPU(io_getevents)
@@ -238,7 +239,7 @@ SYSCALL_SPU(io_cancel)
SYSCALL(set_tid_address)
SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
SYSCALL(exit_group)
-SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+COMPAT_SYS(lookup_dcookie)
SYSCALL_SPU(epoll_create)
SYSCALL_SPU(epoll_ctl)
SYSCALL_SPU(epoll_wait)
@@ -253,7 +254,7 @@ COMPAT_SYS_SPU(clock_gettime)
COMPAT_SYS_SPU(clock_getres)
COMPAT_SYS_SPU(clock_nanosleep)
SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
-COMPAT_SYS_SPU(tgkill)
+SYSCALL_SPU(tgkill)
COMPAT_SYS_SPU(utimes)
COMPAT_SYS_SPU(statfs64)
COMPAT_SYS_SPU(fstatfs64)
@@ -272,12 +273,12 @@ COMPAT_SYS(mq_timedreceive)
COMPAT_SYS(mq_notify)
COMPAT_SYS(mq_getsetattr)
COMPAT_SYS(kexec_load)
-COMPAT_SYS(add_key)
-COMPAT_SYS(request_key)
+SYSCALL(add_key)
+SYSCALL(request_key)
COMPAT_SYS(keyctl)
COMPAT_SYS(waitid)
-COMPAT_SYS(ioprio_set)
-COMPAT_SYS(ioprio_get)
+SYSCALL(ioprio_set)
+SYSCALL(ioprio_get)
SYSCALL(inotify_init)
SYSCALL(inotify_add_watch)
SYSCALL(inotify_rm_watch)
@@ -357,3 +358,4 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9..ba7b197 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
+#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
@@ -182,8 +185,6 @@ static inline bool test_thread_local_flags(unsigned int flags)
#define is_32bit_task() (1)
#endif
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
new file mode 100644
index 0000000..9dfbc34
--- /dev/null
+++ b/arch/powerpc/include/asm/tm.h
@@ -0,0 +1,22 @@
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#include <uapi/asm/tm.h>
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+extern void do_load_up_transact_fpu(struct thread_struct *thread);
+extern void do_load_up_transact_altivec(struct thread_struct *thread);
+#endif
+
+extern void tm_enable(void);
+extern void tm_reclaim(struct thread_struct *thread,
+ unsigned long orig_msr, uint8_t cause);
+extern void tm_recheckpoint(struct thread_struct *thread,
+ unsigned long orig_msr);
+extern void tm_abort(uint8_t cause);
+extern void tm_save_sprs(struct thread_struct *thread);
+extern void tm_restore_sprs(struct thread_struct *thread);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 852ed1b..161ab66 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -71,6 +71,7 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int start_topology_update(void);
extern int stop_topology_update(void);
+extern int prrn_is_enabled(void);
#else
static inline int start_topology_update(void)
{
@@ -80,6 +81,10 @@ static inline int stop_topology_update(void)
{
return 0;
}
+static inline int prrn_is_enabled(void)
+{
+ return 0;
+}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include <asm-generic/topology.h>
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 5a7510e..dc59091 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_memcons(void);
extern void __init udbg_init_ehv_bc(void);
extern void __init udbg_init_ps3gelic(void);
extern void __init udbg_init_debug_opal_raw(void);
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1d4864a..3ca819f 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 354
+#define __NR_syscalls 355
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
@@ -44,27 +44,17 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#ifdef CONFIG_PPC32
#define __ARCH_WANT_OLD_STAT
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
-#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-/*
- * "Conditional" syscalls
- */
-#define cond_syscall(x) \
- asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index b532060..2301602 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 4ae9a09..282d43a 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -150,6 +150,7 @@ extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void);
+extern void icp_native_eoi(struct irq_data *d);
#ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f7bca63..5182c86 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -40,6 +40,7 @@ header-y += statfs.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
+header-y += tm.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index ed9dd81..5b76579 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -1,6 +1,7 @@
#ifndef _UAPI__ASM_POWERPC_CPUTABLE_H
#define _UAPI__ASM_POWERPC_CPUTABLE_H
+/* in AT_HWCAP */
#define PPC_FEATURE_32 0x80000000
#define PPC_FEATURE_64 0x40000000
#define PPC_FEATURE_601_INSTR 0x20000000
@@ -33,4 +34,12 @@
#define PPC_FEATURE_TRUE_LE 0x00000002
#define PPC_FEATURE_PPC_LE 0x00000001
+/* in AT_HWCAP2 */
+#define PPC_FEATURE2_ARCH_2_07 0x80000000
+#define PPC_FEATURE2_HTM 0x40000000
+#define PPC_FEATURE2_DSCR 0x20000000
+#define PPC_FEATURE2_EBB 0x10000000
+#define PPC_FEATURE2_ISEL 0x08000000
+#define PPC_FEATURE2_TAR 0x04000000
+
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 9497be2..ec0328a 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -299,6 +299,10 @@ struct kvm_guest_debug_arch {
* Type denotes h/w breakpoint, read watchpoint, write
* watchpoint or watchpoint (both read and write).
*/
+#define KVMPPC_DEBUG_NONE 0x0
+#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
__u32 type;
__u32 reserved;
} bp[16];
@@ -338,6 +342,12 @@ struct kvm_allocate_rma {
__u64 rma_size;
};
+/* for KVM_CAP_PPC_RTAS */
+struct kvm_rtas_token_args {
+ char name[120];
+ __u64 token; /* Use a token of 0 to undefine a mapping */
+};
+
struct kvm_book3e_206_tlb_entry {
__u32 mas8;
__u32 mas1;
@@ -398,6 +408,18 @@ struct kvm_get_htab_header {
__u16 n_invalid;
};
+/* Per-vcpu XICS interrupt controller state */
+#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
+
+#define KVM_REG_PPC_ICP_CPPR_SHIFT 56 /* current proc priority */
+#define KVM_REG_PPC_ICP_CPPR_MASK 0xff
+#define KVM_REG_PPC_ICP_XISR_SHIFT 32 /* interrupt status field */
+#define KVM_REG_PPC_ICP_XISR_MASK 0xffffff
+#define KVM_REG_PPC_ICP_MFRR_SHIFT 24 /* pending IPI priority */
+#define KVM_REG_PPC_ICP_MFRR_MASK 0xff
+#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
+#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+
/* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC 1
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
@@ -495,4 +517,16 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
+/* PPC64 eXternal Interrupt Controller Specification */
+#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
+
+/* Layout of 64-bit source attribute values */
+#define KVM_XICS_DESTINATION_SHIFT 0
+#define KVM_XICS_DESTINATION_MASK 0xffffffffULL
+#define KVM_XICS_PRIORITY_SHIFT 32
+#define KVM_XICS_PRIORITY_MASK 0xff
+#define KVM_XICS_LEVEL_SENSITIVE (1ULL << 40)
+#define KVM_XICS_MASKED (1ULL << 41)
+#define KVM_XICS_PENDING (1ULL << 42)
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/uapi/asm/linkage.h b/arch/powerpc/include/uapi/asm/linkage.h
deleted file mode 100644
index e1c4ac1..0000000
--- a/arch/powerpc/include/uapi/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_POWERPC_LINKAGE_H
-#define _ASM_POWERPC_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index ee67a2b..77d2ed3 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -108,6 +108,7 @@ struct pt_regs {
#define PT_DAR 41
#define PT_DSISR 42
#define PT_RESULT 43
+#define PT_DSCR 44
#define PT_REGS_COUNT 44
#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
@@ -146,34 +147,34 @@ struct pt_regs {
* structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
-#define PTRACE_GETVRREGS 18
-#define PTRACE_SETVRREGS 19
+#define PTRACE_GETVRREGS 0x12
+#define PTRACE_SETVRREGS 0x13
/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
* spefscr, in one go */
-#define PTRACE_GETEVRREGS 20
-#define PTRACE_SETEVRREGS 21
+#define PTRACE_GETEVRREGS 0x14
+#define PTRACE_SETEVRREGS 0x15
/* Get the first 32 128bit VSX registers */
-#define PTRACE_GETVSRREGS 27
-#define PTRACE_SETVSRREGS 28
+#define PTRACE_GETVSRREGS 0x1b
+#define PTRACE_SETVSRREGS 0x1c
/*
* Get or set a debug register. The first 16 are DABR registers and the
* second 16 are IABR registers.
*/
-#define PTRACE_GET_DEBUGREG 25
-#define PTRACE_SET_DEBUGREG 26
+#define PTRACE_GET_DEBUGREG 0x19
+#define PTRACE_SET_DEBUGREG 0x1a
/* (new) PTRACE requests using the same numbers as x86 and the same
* argument ordering. Additionally, they support more registers too
*/
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETREGS64 22
-#define PTRACE_SETREGS64 23
+#define PTRACE_GETREGS 0xc
+#define PTRACE_SETREGS 0xd
+#define PTRACE_GETFPREGS 0xe
+#define PTRACE_SETFPREGS 0xf
+#define PTRACE_GETREGS64 0x16
+#define PTRACE_SETREGS64 0x17
/* Calls to trace a 64bit program from a 32bit program */
#define PPC_PTRACE_PEEKTEXT_3264 0x95
@@ -210,6 +211,7 @@ struct ppc_debug_info {
#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002
#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
+#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
index e079fb3..6c69ee9 100644
--- a/arch/powerpc/include/uapi/asm/signal.h
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -90,6 +90,7 @@ typedef struct {
#include <asm-generic/signal-defs.h>
+#ifndef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
@@ -103,10 +104,7 @@ struct sigaction {
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
-
-struct k_sigaction {
- struct sigaction sa;
-};
+#endif
typedef struct sigaltstack {
void __user *ss_sp;
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index eb0b186..a36daf3 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -29,7 +29,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_RCVLOWAT 16
#define SO_SNDLOWAT 17
#define SO_RCVTIMEO 18
@@ -77,4 +77,8 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
+#define SO_SELECT_ERR_QUEUE 45
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644
index 0000000..85059a0
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_RESCHED 0xde
+#define TM_CAUSE_TLBI 0xdc
+#define TM_CAUSE_FAC_UNAV 0xda
+#define TM_CAUSE_SYSCALL 0xd8 /* future use */
+#define TM_CAUSE_MISC 0xd6 /* future use */
+#define TM_CAUSE_SIGNAL 0xd4
+#define TM_CAUSE_ALIGNMENT 0xd2
+#define TM_CAUSE_EMULATE 0xd0
+
+#endif
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 8c478c6..74cb4d7 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -376,6 +376,7 @@
#define __NR_process_vm_readv 351
#define __NR_process_vm_writev 352
#define __NR_finit_module 353
+#define __NR_kcmp 354
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index e50da21..4abfca3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -75,8 +75,8 @@ endif
obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
-obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
+obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o
+obj-$(CONFIG_PPC_DOORBELL) += dbell.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
extra-y := head_$(CONFIG_WORD_SIZE).o
@@ -92,7 +92,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += fsl_booke_cache.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
@@ -123,6 +122,8 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
obj-y += iomap.o
endif
+obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o
+
obj-$(CONFIG_PPC64) += $(obj64-y)
obj-$(CONFIG_PPC32) += $(obj32-y)
@@ -143,6 +144,7 @@ GCOV_PROFILE_kprobes.o := n
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_ALTIVEC) += vector.o
extra-$(CONFIG_PPC64) += entry_64.o
+extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
extra-y += systbl_chk.i
$(obj)/systbl.o: systbl_chk
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 006b3ea..1ee1ab9c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -77,6 +77,7 @@ int main(void)
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
+ DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
@@ -121,6 +122,43 @@ int main(void)
DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar));
+ DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
+ DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
+ DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
+ DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
+ DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
+ DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
+ DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
+ DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
+ DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
+ DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
+ DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
+ DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
+ DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
+ transact_vr[0]));
+ DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
+ transact_vscr));
+ DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
+ transact_vrsave));
+ DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct,
+ transact_fpr[0]));
+ DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
+ transact_fpscr));
+#ifdef CONFIG_VSX
+ DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
+ transact_fpr[0]));
+#endif
+ /* Local pt_regs on stack for Transactional Memory funcs. */
+ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
+ sizeof(struct pt_regs) + 16);
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -453,6 +491,7 @@ int main(void)
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
+ DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -479,6 +518,7 @@ int main(void)
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+ DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
@@ -548,6 +588,8 @@ int main(void)
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
+ HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
HSTATE_FIELD(HSTATE_PMC, host_pmc);
HSTATE_FIELD(HSTATE_PURR, host_purr);
@@ -558,6 +600,10 @@ int main(void)
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ HSTATE_FIELD(HSTATE_CFAR, cfar);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 57cf140..18b5b9c 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -48,6 +48,8 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
+ bl __init_FSCR
+ bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -56,21 +58,28 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
+ bl __init_HFSCR
bl __init_TLB
+ bl __init_PMU_HV
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
+ bl __init_FSCR
+ bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
+ mtlr r11
beqlr
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
+ bl __init_HFSCR
bl __init_TLB
+ bl __init_PMU_HV
mtlr r11
blr
@@ -112,9 +121,26 @@ __init_LPCR:
isync
blr
+__init_FSCR:
+ mfspr r3,SPRN_FSCR
+ ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
+ mtspr SPRN_FSCR,r3
+ blr
+
+__init_HFSCR:
+ mfspr r3,SPRN_HFSCR
+ ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
+ HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
+ mtspr SPRN_HFSCR,r3
+ blr
+
__init_TLB:
- /* Clear the TLB */
- li r6,128
+ /*
+ * Clear the TLB using the "IS 3" form of tlbiel instruction
+ * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
+ * so we just always do 512
+ */
+ li r6,512
mtctr r6
li r7,0xc00 /* IS field = 0b11 */
ptesync
@@ -123,3 +149,18 @@ __init_TLB:
bdnz 2b
ptesync
1: blr
+
+__init_PMU_HV:
+ li r5,0
+ mtspr SPRN_MMCRC,r5
+ mtspr SPRN_MMCRH,r5
+ blr
+
+__init_PMU:
+ li r5,0
+ mtspr SPRN_MMCRS,r5
+ mtspr SPRN_MMCRA,r5
+ mtspr SPRN_MMCR0,r5
+ mtspr SPRN_MMCR1,r5
+ mtspr SPRN_MMCR2,r5
+ blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8768be4..538861c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -98,10 +98,14 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR)
#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -277,7 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -428,6 +432,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -443,10 +448,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER8 (architected)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
@@ -458,6 +464,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -475,6 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -492,13 +500,14 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER8 (raw)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
@@ -1995,6 +2004,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -2015,6 +2025,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -2032,6 +2043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e500mc",
.cpu_features = CPU_FTRS_E500MC,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
@@ -2051,6 +2063,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e5500",
.cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
@@ -2074,6 +2087,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_E6500,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
.icache_bsize = 64,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index b3ba516..9ec3fe1 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
#endif
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index a892680..d55c76c 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -21,7 +21,7 @@
#ifdef CONFIG_SMP
void doorbell_setup_this_cpu(void)
{
- unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
+ unsigned long tag = mfspr(SPRN_DOORBELL_CPUTAG) & PPC_DBELL_TAG_MASK;
smp_muxed_ipi_set_data(smp_processor_id(), tag);
}
@@ -30,7 +30,7 @@ void doorbell_cause_ipi(int cpu, unsigned long data)
{
/* Order previous accesses vs. msgsnd, which is treated as a store */
mb();
- ppc_msgsnd(PPC_DBELL, 0, data);
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, data);
}
void doorbell_exception(struct pt_regs *regs)
@@ -41,6 +41,8 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
+ __get_cpu_var(irq_stat).doorbell_irqs++;
+
smp_ipi_demux();
irq_exit();
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3094981..8041d10 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,8 +439,6 @@ ret_from_fork:
ret_from_kernel_thread:
REST_NVGPRS(r1)
bl schedule_tail
- li r3,0
- stw r3,0(r1)
mtlr r14
mr r3,r15
PPC440EP_ERR42
@@ -851,7 +849,7 @@ resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
- andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
+ andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 1ae4d8d..3843ca4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -33,6 +33,7 @@
#include <asm/irqflags.h>
#include <asm/ftrace.h>
#include <asm/hw_irq.h>
+#include <asm/context_tracking.h>
/*
* System calls.
@@ -62,8 +63,9 @@ system_call_common:
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
+ beq 2f /* if from kernel mode */
ACCOUNT_CPU_USER_ENTRY(r10, r11)
- std r2,GPR2(r1)
+2: std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
std r4,GPR4(r1)
@@ -94,7 +96,7 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
beq 33f
/* if from user, see if there are any DTL entries to process */
@@ -110,7 +112,7 @@ BEGIN_FW_FTR_SECTION
addi r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
/*
* A syscall should always be called with interrupts enabled
@@ -149,7 +151,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
- bne- syscall_dotrace
+ bne syscall_dotrace
.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -226,6 +228,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
beq- 1f
ACCOUNT_CPU_USER_EXIT(r11, r12)
+ HMT_MEDIUM_LOW_HAS_PPR
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
@@ -302,6 +305,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
+ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -373,8 +377,6 @@ _GLOBAL(ret_from_fork)
_GLOBAL(ret_from_kernel_thread)
bl .schedule_tail
REST_NVGPRS(r1)
- li r3,0
- std r3,0(r1)
ld r14, 0(r14)
mtlr r14
mr r3,r15
@@ -445,6 +447,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FTR_SECTION
+ /*
+ * Back up the TAR across context switches. Note that the TAR is not
+ * available for use in the kernel. (To provide this, the TAR should
+ * be backed up/restored on exception entry/exit instead, and be in
+ * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
+ */
+ mfspr r0,SPRN_TAR
+ std r0,THREAD_TAR(r3)
+
+ /* Event based branch registers */
+ mfspr r0, SPRN_BESCR
+ std r0, THREAD_BESCR(r3)
+ mfspr r0, SPRN_EBBHR
+ std r0, THREAD_EBBHR(r3)
+ mfspr r0, SPRN_EBBRR
+ std r0, THREAD_EBBRR(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+#endif
+
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -464,6 +487,13 @@ BEGIN_FTR_SECTION
ldarx r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+ DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
@@ -527,6 +557,21 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
+#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FTR_SECTION
+ /* Event based branch registers */
+ ld r0, THREAD_BESCR(r4)
+ mtspr SPRN_BESCR, r0
+ ld r0, THREAD_EBBHR(r4)
+ mtspr SPRN_EBBHR, r0
+ ld r0, THREAD_EBBRR(r4)
+ mtspr SPRN_EBBRR, r0
+
+ ld r0,THREAD_TAR(r4)
+ mtspr SPRN_TAR,r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+#endif
+
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -595,7 +640,7 @@ _GLOBAL(ret_from_except_lite)
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
- bl .schedule
+ SCHEDULE_USER
b .ret_from_except_lite
1: bl .save_nvgprs
@@ -634,7 +679,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
- ldarx r4,0,r5
+0: ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
@@ -762,6 +807,10 @@ fast_exception_return:
andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* TM debug */
+ std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
+#endif
/*
* r13 is our per cpu area, only restore it if we are returning to
* userspace the value stored in the stack frame may belong to
@@ -770,6 +819,7 @@ fast_exception_return:
andi. r0,r3,MSR_PR
beq 1f
ACCOUNT_CPU_USER_EXIT(r2, r4)
+ RESTORE_PPR(r2, r4)
REST_GPR(13, r1)
1:
mtspr SPRN_SRR1,r3
@@ -849,13 +899,22 @@ restore_check_irq_replay:
addi r3,r1,STACK_FRAME_OVERHEAD;
bl .timer_interrupt
b .ret_from_except
+#ifdef CONFIG_PPC_DOORBELL
+1:
#ifdef CONFIG_PPC_BOOK3E
-1: cmpwi cr0,r3,0x280
+ cmpwi cr0,r3,0x280
+#else
+ BEGIN_FTR_SECTION
+ cmpwi cr0,r3,0xe80
+ FTR_SECTION_ELSE
+ cmpwi cr0,r3,0xa00
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+#endif /* CONFIG_PPC_BOOK3E */
bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD;
bl .doorbell_exception
b .ret_from_except
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_DOORBELL */
1: b .ret_from_except /* What else to do here ? */
unrecov_restore:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 9848713..88a2302 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
+#endif
bool epapr_paravirt_enabled;
@@ -47,11 +49,15 @@ int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
+#endif
}
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
+#endif
epapr_paravirt_enabled = true;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index a6ef0db..4add11d 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -159,8 +159,9 @@ exc_##n##_common: \
std r9,GPR9(r1); /* save r9 in stackframe */ \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
+ beq 2f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \
- ld r3,excf+EX_R10(r13); /* get back r10 */ \
+2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \
std r12,GPR12(r1); /* save r12 in stackframe */ \
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4665e82..40e4a17 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
mflr r10 ; \
ld r12,PACAKBASE(r13) ; \
LOAD_HANDLER(r12, system_call_entry_direct) ; \
- mtlr r12 ; \
+ mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \
/* Re-use of r13... No spare regs to do this */ \
li r13,MSR_RI ; \
mtmsrd r13,1 ; \
GET_PACA(r13) ; /* get r13 back */ \
- blr ;
+ bctr ;
#else
/* We can branch directly */
#define SYSCALL_PSERIES_2_DIRECT \
@@ -104,7 +104,7 @@ __start_interrupts:
.globl system_reset_pSeries;
system_reset_pSeries:
- HMT_MEDIUM;
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -153,12 +153,15 @@ machine_check_pSeries_1:
* some code path might still want to branch into the original
* vector
*/
- b machine_check_pSeries
+ HMT_MEDIUM_PPR_DISCARD
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_0(PACA_EXMC)
+ b machine_check_pSeries_0
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
BEGIN_FTR_SECTION
b data_access_check_stab
@@ -170,8 +173,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
@@ -201,8 +205,9 @@ data_access_slb_pSeries:
. = 0x480
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
@@ -230,6 +235,7 @@ instruction_access_slb_pSeries:
.globl hardware_interrupt_hv;
hardware_interrupt_pSeries:
hardware_interrupt_hv:
+ HMT_MEDIUM_PPR_DISCARD
BEGIN_FTR_SECTION
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
EXC_HV, SOFTEN_TEST_HV)
@@ -249,10 +255,14 @@ hardware_interrupt_hv:
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
- MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
+ . = 0x900
+ .globl decrementer_pSeries
+decrementer_pSeries:
+ _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
+
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
- STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
@@ -284,16 +294,30 @@ system_call_pSeries:
*/
. = 0xe00
hv_exception_trampoline:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_data_storage_hv
+
. = 0xe20
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_instr_storage_hv
+
. = 0xe40
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b emulation_assist_hv
- . = 0xe50
- b hmi_exception_hv
+
. = 0xe60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b hmi_exception_hv
+ . = 0xe80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_doorbell_hv
+
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
* prolog code of the PerformanceMonitor one. A little
@@ -301,16 +325,27 @@ hv_exception_trampoline:
*/
performance_monitor_pSeries_1:
. = 0xf00
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b performance_monitor_pSeries
altivec_unavailable_pSeries_1:
. = 0xf20
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b altivec_unavailable_pSeries
vsx_unavailable_pSeries_1:
. = 0xf40
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_pSeries
+ . = 0xf60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b tm_unavailable_pSeries
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
@@ -322,11 +357,9 @@ vsx_unavailable_pSeries_1:
. = 0x1500
.global denorm_exception_hv
denorm_exception_hv:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
mtspr SPRN_SPRG_HSCRATCH0,r13
- mfspr r13,SPRN_SPRG_HPACA
- std r9,PACA_EXGEN+EX_R9(r13)
- std r10,PACA_EXGEN+EX_R10(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
std r11,PACA_EXGEN+EX_R11(r13)
std r12,PACA_EXGEN+EX_R12(r13)
mfspr r9,SPRN_SPRG_HSCRATCH0
@@ -367,10 +400,12 @@ denorm_exception_hv:
machine_check_pSeries:
.globl machine_check_fwnmi
machine_check_fwnmi:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
- EXC_STD, KVMTEST, 0x200)
+ EXCEPTION_PROLOG_0(PACA_EXMC)
+machine_check_pSeries_0:
+ EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
+ EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
/* moved from 0x300 */
@@ -419,38 +454,14 @@ BEGIN_FTR_SECTION
xori r10,r10,(MSR_FE0|MSR_FE1)
mtmsrd r10
sync
- fmr 0,0
- fmr 1,1
- fmr 2,2
- fmr 3,3
- fmr 4,4
- fmr 5,5
- fmr 6,6
- fmr 7,7
- fmr 8,8
- fmr 9,9
- fmr 10,10
- fmr 11,11
- fmr 12,12
- fmr 13,13
- fmr 14,14
- fmr 15,15
- fmr 16,16
- fmr 17,17
- fmr 18,18
- fmr 19,19
- fmr 20,20
- fmr 21,21
- fmr 22,22
- fmr 23,23
- fmr 24,24
- fmr 25,25
- fmr 26,26
- fmr 27,27
- fmr 28,28
- fmr 29,29
- fmr 30,30
- fmr 31,31
+
+#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n) FMR2(n) ; FMR2(n+2)
+#define FMR8(n) FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+ FMR32(0)
+
FTR_SECTION_ELSE
/*
* To denormalise we need to move a copy of the register to itself.
@@ -460,42 +471,29 @@ FTR_SECTION_ELSE
oris r10,r10,MSR_VSX@h
mtmsrd r10
sync
- XVCPSGNDP(0,0,0)
- XVCPSGNDP(1,1,1)
- XVCPSGNDP(2,2,2)
- XVCPSGNDP(3,3,3)
- XVCPSGNDP(4,4,4)
- XVCPSGNDP(5,5,5)
- XVCPSGNDP(6,6,6)
- XVCPSGNDP(7,7,7)
- XVCPSGNDP(8,8,8)
- XVCPSGNDP(9,9,9)
- XVCPSGNDP(10,10,10)
- XVCPSGNDP(11,11,11)
- XVCPSGNDP(12,12,12)
- XVCPSGNDP(13,13,13)
- XVCPSGNDP(14,14,14)
- XVCPSGNDP(15,15,15)
- XVCPSGNDP(16,16,16)
- XVCPSGNDP(17,17,17)
- XVCPSGNDP(18,18,18)
- XVCPSGNDP(19,19,19)
- XVCPSGNDP(20,20,20)
- XVCPSGNDP(21,21,21)
- XVCPSGNDP(22,22,22)
- XVCPSGNDP(23,23,23)
- XVCPSGNDP(24,24,24)
- XVCPSGNDP(25,25,25)
- XVCPSGNDP(26,26,26)
- XVCPSGNDP(27,27,27)
- XVCPSGNDP(28,28,28)
- XVCPSGNDP(29,29,29)
- XVCPSGNDP(30,30,30)
- XVCPSGNDP(31,31,31)
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+ XVCPSGNDP32(0)
+
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ b denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+ XVCPSGNDP32(32)
+denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
+ RESTORE_PPR_PACA(PACA_EXGEN, r10)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -506,28 +504,34 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
.align 7
/* moved from 0xe00 */
- STD_EXCEPTION_HV(., 0xe02, h_data_storage)
+ STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
- STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
+ STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
- STD_EXCEPTION_HV(., 0xe42, emulation_assist)
+ STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
- STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
+ STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
+ MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
- STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
- STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
+ STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+ STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
/*
- * An interrupt came in while soft-disabled. We set paca->irq_happened,
- * then, if it was a decrementer interrupt, we bump the dec to max and
- * and return, else we hard disable and return. This is called with
- * r10 containing the value to OR to the paca field.
+ * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
+ * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a doorbell we return immediately since doorbells are edge
+ * triggered and won't automatically refire.
+ * - else we hard disable and return.
+ * This is called with r10 containing the value to OR to the paca field.
*/
#define MASKED_INTERRUPT(_H) \
masked_##_H##interrupt: \
@@ -535,13 +539,15 @@ masked_##_H##interrupt: \
lbz r11,PACAIRQHAPPENED(r13); \
or r11,r11,r10; \
stb r11,PACAIRQHAPPENED(r13); \
- andi. r10,r10,PACA_IRQ_DEC; \
- beq 1f; \
+ cmpwi r10,PACA_IRQ_DEC; \
+ bne 1f; \
lis r10,0x7fff; \
ori r10,r10,0xffff; \
mtspr SPRN_DEC,r10; \
b 2f; \
-1: mfspr r10,SPRN_##_H##SRR1; \
+1: cmpwi r10,PACA_IRQ_DBELL; \
+ beq 2f; \
+ mfspr r10,SPRN_##_H##SRR1; \
rldicl r10,r10,48,1; /* clear MSR_EE */ \
rotldi r10,r10,16; \
mtspr SPRN_##_H##SRR1,r10; \
@@ -558,8 +564,8 @@ masked_##_H##interrupt: \
/*
* Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains 0x500 or 0x900 to indicate which
- * kind of interrupt. MSR:EE is already off. We generate a
+ * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
+ * which kind of interrupt. MSR:EE is already off. We generate a
* stackframe like if a real interrupt had happened.
*
* Note: While MSR:EE is off, we need to make sure that _MSR
@@ -575,9 +581,18 @@ _GLOBAL(__replay_interrupt)
mflr r11
mfcr r9
ori r12,r12,MSR_EE
- andi. r3,r3,0x0800
- bne decrementer_common
- b hardware_interrupt_common
+ cmpwi r3,0x900
+ beq decrementer_common
+ cmpwi r3,0x500
+ beq hardware_interrupt_common
+BEGIN_FTR_SECTION
+ cmpwi r3,0xe80
+ beq h_doorbell_common
+FTR_SECTION_ELSE
+ cmpwi r3,0xa00
+ beq doorbell_super_common
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ blr
#ifdef CONFIG_PPC_PSERIES
/*
@@ -586,7 +601,7 @@ _GLOBAL(__replay_interrupt)
.globl system_reset_fwnmi
.align 7
system_reset_fwnmi:
- HMT_MEDIUM
+ HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
NOTEST, 0x100)
@@ -640,9 +655,18 @@ slb_miss_user_pseries:
.align 7
.globl machine_check_common
machine_check_common:
+
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
FINISH_NAP
DISABLE_INTS
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .machine_check_exception
@@ -651,12 +675,21 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
- STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+#ifdef CONFIG_PPC_DOORBELL
+ STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
+#else
+ STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
+#endif
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
+#ifdef CONFIG_PPC_DOORBELL
+ STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
+#else
+ STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
+#endif
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
@@ -690,8 +723,8 @@ machine_check_common:
. = 0x4380
.globl data_access_slb_relon_pSeries
data_access_slb_relon_pSeries:
- HMT_MEDIUM
SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
@@ -715,8 +748,8 @@ data_access_slb_relon_pSeries:
. = 0x4480
.globl instruction_access_slb_relon_pSeries
instruction_access_slb_relon_pSeries:
- HMT_MEDIUM
SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
@@ -740,12 +773,13 @@ hardware_interrupt_relon_hv:
_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
FTR_SECTION_ELSE
_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
+ MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
. = 0x4c00
@@ -759,56 +793,60 @@ system_call_relon_pSeries:
STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
. = 0x4e00
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_data_storage_relon_hv
. = 0x4e20
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_instr_storage_relon_hv
. = 0x4e40
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b emulation_assist_relon_hv
- . = 0x4e50
- b hmi_exception_relon_hv
-
. = 0x4e60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b hmi_exception_relon_hv
- /* For when we support the doorbell interrupt:
- STD_RELON_EXCEPTION_HYPERVISOR(0x4e80, 0xe80, doorbell_hyper)
- */
+ . = 0x4e80
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_doorbell_relon_hv
performance_monitor_relon_pSeries_1:
. = 0x4f00
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b performance_monitor_relon_pSeries
altivec_unavailable_relon_pSeries_1:
. = 0x4f20
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b altivec_unavailable_relon_pSeries
vsx_unavailable_relon_pSeries_1:
. = 0x4f40
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
b vsx_unavailable_relon_pSeries
-#ifdef CONFIG_CBE_RAS
- STD_RELON_EXCEPTION_HV(0x5200, 0x1202, cbe_system_error)
-#endif /* CONFIG_CBE_RAS */
+tm_unavailable_relon_pSeries_1:
+ . = 0x4f60
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b tm_unavailable_relon_pSeries
+
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
. = 0x5500
b denorm_exception_hv
#endif
-#ifdef CONFIG_CBE_RAS
- STD_RELON_EXCEPTION_HV(0x5600, 0x1602, cbe_maintenance)
-#else
-#ifdef CONFIG_HVC_SCOM
- STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
- KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
-#endif /* CONFIG_HVC_SCOM */
-#endif /* CONFIG_CBE_RAS */
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
-#ifdef CONFIG_CBE_RAS
- STD_RELON_EXCEPTION_HV(0x5800, 0x1802, cbe_thermal)
-#endif /* CONFIG_CBE_RAS */
/* Other future vectors */
.align 7
@@ -1000,77 +1038,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
- mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate_realmode
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-2: mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b .
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- ld r10,_LINK(r1) /* make idle task do the */
- std r10,_NIP(r1) /* equivalent of a blr */
- blr
-#endif
-
.align 7
.globl alignment_common
alignment_common:
@@ -1109,9 +1076,26 @@ fp_unavailable_common:
addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_fp_unavailable_exception
BUG_OPCODE
-1: bl .load_up_fpu
+1:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ bl .load_up_fpu
b fast_exception_return
-
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl .save_nvgprs
+ DISABLE_INTS
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .fp_unavailable_tm
+ b .ret_from_except
+#endif
.align 7
.globl altivec_unavailable_common
altivec_unavailable_common:
@@ -1119,8 +1103,25 @@ altivec_unavailable_common:
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
beq 1f
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION_NESTED(69)
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+ END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
+#endif
bl .load_up_altivec
b fast_exception_return
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl .save_nvgprs
+ DISABLE_INTS
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .altivec_unavailable_tm
+ b .ret_from_except
+#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
@@ -1137,7 +1138,24 @@ vsx_unavailable_common:
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
beq 1f
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION_NESTED(69)
+ /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
+ * transaction), go do TM stuff
+ */
+ rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
+ bne- 2f
+ END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
+#endif
b .load_up_vsx
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+2: /* User process was in a transaction */
+ bl .save_nvgprs
+ DISABLE_INTS
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .vsx_unavailable_tm
+ b .ret_from_except
+#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
@@ -1148,9 +1166,147 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
b .ret_from_except
.align 7
+ .globl tm_unavailable_common
+tm_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
+ bl .save_nvgprs
+ DISABLE_INTS
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .tm_unavailable_exception
+ b .ret_from_except
+
+ .align 7
.globl __end_handlers
__end_handlers:
+ /* Equivalents to the above handlers for relocation-on interrupt vectors */
+ STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
+ STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
+ STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
+ STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
+
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
+ STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+ .= 0x7000
+ .globl fwnmi_data_area
+fwnmi_data_area:
+
+ /* pseries and powernv need to keep the whole page from
+ * 0x7000 to 0x8000 free for use by the firmware
+ */
+ . = 0x8000
+#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
+
+/* Space for CPU0's segment table */
+ .balign 4096
+ .globl initial_stab
+initial_stab:
+ .space 4096
+
+#ifdef CONFIG_PPC_POWERNV
+_GLOBAL(opal_mc_secondary_handler)
+ HMT_MEDIUM_PPR_DISCARD
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ clrldi r3,r3,2
+ tovirt(r3,r3)
+ std r3,PACA_OPAL_MC_EVT(r13)
+ ld r13,OPAL_MC_SRR0(r3)
+ mtspr SPRN_SRR0,r13
+ ld r13,OPAL_MC_SRR1(r3)
+ mtspr SPRN_SRR1,r13
+ ld r3,OPAL_MC_GPR3(r3)
+ GET_SCRATCH0(r13)
+ b machine_check_pSeries
+#endif /* CONFIG_PPC_POWERNV */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- 2f
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+ RESTORE_PPR_PACA(PACA_EXSLB, r9)
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+2: mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecov_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
/*
* Hash table stuff
*/
@@ -1222,7 +1378,7 @@ handle_dabr_fault:
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_dabr
+ bl .do_break
12: b .ret_from_except_lite
@@ -1268,20 +1424,36 @@ do_ste_alloc:
_GLOBAL(do_stab_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+ mfspr r11,SPRN_DAR /* ea */
+
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r11,4,(63 - 46 - 4)
+ li r9,0 /* VSID = 0 for bad address */
+ bne- 0f
+ /*
+ * Calculate VSID:
+ * This is the kernel vsid, we take the top for context from
+ * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * Here we know that (ea >> 60) == 0xc
+ */
+ lis r9,(MAX_USER_CONTEXT + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT + 1)@l
+
+ srdi r10,r11,SID_SHIFT
+ rldimi r10,r9,ESID_BITS,0 /* proto vsid */
+ ASM_VSID_SCRAMBLE(r10, r9, 256M)
+ rldic r9,r10,12,16 /* r9 = vsid << 12 */
+
+0:
/* Hash to the primary group */
ld r10,PACASTABVIRT(r13)
- mfspr r11,SPRN_DAR
- srdi r11,r11,28
+ srdi r11,r11,SID_SHIFT
rldimi r10,r11,7,52 /* r10 = first ste of the group */
- /* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID | 1 << 37 */
- li r9,0x1
- rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
- ASM_VSID_SCRAMBLE(r11, r9, 256M)
- rldic r9,r11,12,16 /* r9 = vsid << 12 */
-
/* Search the primary group for a free entry */
1: ld r11,0(r10) /* Test valid bit of the current ste */
andi. r11,r11,0x80
@@ -1344,56 +1516,3 @@ _GLOBAL(do_stab_bolted)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
-
-
- /* Equivalents to the above handlers for relocation-on interrupt vectors */
- STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
- STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
- STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
- STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception)
- KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
-
- STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
- STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
- STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
-
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
- .= 0x7000
- .globl fwnmi_data_area
-fwnmi_data_area:
-
- /* pseries and powernv need to keep the whole page from
- * 0x7000 to 0x8000 free for use by the firmware
- */
- . = 0x8000
-#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
-
-/* Space for CPU0's segment table */
- .balign 4096
- .globl initial_stab
-initial_stab:
- .space 4096
-
-#ifdef CONFIG_PPC_POWERNV
-_GLOBAL(opal_mc_secondary_handler)
- HMT_MEDIUM
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- clrldi r3,r3,2
- tovirt(r3,r3)
- std r3,PACA_OPAL_MC_EVT(r13)
- ld r13,OPAL_MC_SRR0(r3)
- mtspr SPRN_SRR0,r13
- ld r13,OPAL_MC_SRR1(r3)
- mtspr SPRN_SRR1,r13
- ld r3,OPAL_MC_GPR3(r3)
- GET_SCRATCH0(r13)
- b machine_check_pSeries
-#endif /* CONFIG_PPC_POWERNV */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 06c8202..2230fd0 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end)
if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e0ada05..caeaabf 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -35,6 +35,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
+#define __REST_32FPVSRS_TRANSACT(n,c,base) \
+BEGIN_FTR_SECTION \
+ b 2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ REST_32FPRS_TRANSACT(n,base); \
+ b 3f; \
+2: REST_32VSRS_TRANSACT(n,c,base); \
+3:
+
#define __SAVE_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -45,11 +54,68 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
3:
#else
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+#define REST_32FPVSRS_TRANSACT(n,c,base) \
+ __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Wrapper to call load_up_fpu from C.
+ * void do_load_up_fpu(struct pt_regs *regs);
+ */
+_GLOBAL(do_load_up_fpu)
+ mflr r0
+ std r0, 16(r1)
+ stdu r1, -112(r1)
+
+ subi r6, r3, STACK_FRAME_OVERHEAD
+ /* load_up_fpu expects r12=MSR, r13=PACA, and returns
+ * with r12 = new MSR.
+ */
+ ld r12,_MSR(r6)
+ GET_PACA(r13)
+
+ bl load_up_fpu
+ std r12,_MSR(r6)
+
+ ld r0, 112+16(r1)
+ addi r1, r1, 112
+ mtlr r0
+ blr
+
+
+/* void do_load_up_transact_fpu(struct thread_struct *thread)
+ *
+ * This is similar to load_up_fpu but for the transactional version of the FP
+ * register set. It doesn't mess with the task MSR or valid flags.
+ * Furthermore, we don't do lazy FP with TM currently.
+ */
+_GLOBAL(do_load_up_transact_fpu)
+ mfmsr r6
+ ori r5,r6,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r5,r5,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ SYNC
+ MTMSRD(r5)
+
+ lfd fr0,THREAD_TRANSACT_FPSCR(r3)
+ MTFSF_L(fr0)
+ REST_32FPVSRS_TRANSACT(0, R4, R3)
+
+ /* FP/VSX off again */
+ MTMSRD(r6)
+ SYNC
+
+ blr
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 4989661..8a9b6f5 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -430,30 +430,18 @@ label:
EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
- START_EXCEPTION(0x1000, Decrementer)
- NORMAL_EXCEPTION_PROLOG
- lis r0,TSR_PIS@h
- mtspr SPRN_TSR,r0 /* Clear the PIT exception */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x1000, timer_interrupt)
-
-#if 0
-/* NOTE:
- * FIT and WDT handlers are not implemented yet.
- */
+ . = 0x1000
+ b Decrementer
/* 0x1010 - Fixed Interval Timer (FIT) Exception
*/
- STND_EXCEPTION(0x1010, FITException, unknown_exception)
+ . = 0x1010
+ b FITException
/* 0x1020 - Watchdog Timer (WDT) Exception
*/
-#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
-#else
- CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
-#endif
-#endif
+ . = 0x1020
+ b WDTException
/* 0x1100 - Data TLB Miss Exception
* As the name implies, translation is not in the MMU, so search the
@@ -738,6 +726,29 @@ label:
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+ /* Programmable Interval Timer (PIT) Exception. (from 0x1000) */
+Decrementer:
+ NORMAL_EXCEPTION_PROLOG
+ lis r0,TSR_PIS@h
+ mtspr SPRN_TSR,r0 /* Clear the PIT exception */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ EXC_XFER_LITE(0x1000, timer_interrupt)
+
+ /* Fixed Interval Timer (FIT) Exception. (from 0x1010) */
+FITException:
+ NORMAL_EXCEPTION_PROLOG
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ EXC_XFER_EE(0x1010, unknown_exception)
+
+ /* Watchdog Timer (WDT) Exception. (from 0x1020) */
+WDTException:
+ CRITICAL_EXCEPTION_PROLOG;
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2,
+ (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
+ NOCOPY, crit_transfer_to_handler,
+ ret_from_crit_exc)
+
/*
* The other Data TLB exceptions bail out to this point
* if they can't resolve the lightweight TLB fault.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 9342e97..843f71a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -122,6 +122,8 @@ __secondary_hold:
#endif
/* Grab our physical cpu number */
mr r24,r3
+ /* stash r4 for book3e */
+ mr r25,r4
/* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */
@@ -129,16 +131,31 @@ __secondary_hold:
std r24,__secondary_hold_acknowledge-_stext(0)
sync
+ li r26,0
+#ifdef CONFIG_PPC_BOOK3E
+ tovirt(r26,r26)
+#endif
/* All secondary cpus wait here until told to start. */
-100: ld r4,__secondary_hold_spinloop-_stext(0)
+100: ld r4,__secondary_hold_spinloop-_stext(r26)
cmpdi 0,r4,0
beq 100b
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+#ifdef CONFIG_PPC_BOOK3E
+ tovirt(r4,r4)
+#endif
ld r4,0(r4) /* deref function descriptor */
mtctr r4
mr r3,r24
+ /*
+ * it may be the case that other platforms have r4 right to
+ * begin with, this gives us some safety in case it is not
+ */
+#ifdef CONFIG_PPC_BOOK3E
+ mr r4,r25
+#else
li r4,0
+#endif
/* Make sure that patched code is visible */
isync
bctr
@@ -507,6 +524,7 @@ _GLOBAL(copy_and_flush)
sync
addi r5,r5,8
addi r6,r6,8
+ isync
blr
.align 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index df564e9..a620203 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -199,11 +199,6 @@
.align 5; \
label:
-#define FINISH_EXCEPTION(func) \
- bl transfer_to_handler_full; \
- .long func; \
- .long ret_from_except_full
-
#define EXCEPTION(n, intno, label, hdlr, xfer) \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(intno); \
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a89cae4..a949bdf 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (current->thread.last_hit_ubp != bp)
- set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
+ set_breakpoint(info);
return 0;
}
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
}
*slot = NULL;
- set_dabr(0, 0);
+ hw_breakpoint_disable();
}
/*
@@ -127,19 +127,13 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
int arch_bp_generic_fields(int type, int *gen_bp_type)
{
- switch (type) {
- case DABR_DATA_READ:
- *gen_bp_type = HW_BREAKPOINT_R;
- break;
- case DABR_DATA_WRITE:
- *gen_bp_type = HW_BREAKPOINT_W;
- break;
- case (DABR_DATA_WRITE | DABR_DATA_READ):
- *gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
- break;
- default:
+ *gen_bp_type = 0;
+ if (type & HW_BRK_TYPE_READ)
+ *gen_bp_type |= HW_BREAKPOINT_R;
+ if (type & HW_BRK_TYPE_WRITE)
+ *gen_bp_type |= HW_BREAKPOINT_W;
+ if (*gen_bp_type == 0)
return -EINVAL;
- }
return 0;
}
@@ -148,35 +142,28 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
- int ret = -EINVAL;
+ int ret = -EINVAL, length_max;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
- switch (bp->attr.bp_type) {
- case HW_BREAKPOINT_R:
- info->type = DABR_DATA_READ;
- break;
- case HW_BREAKPOINT_W:
- info->type = DABR_DATA_WRITE;
- break;
- case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
- info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
- break;
- default:
+ info->type = HW_BRK_TYPE_TRANSLATE;
+ if (bp->attr.bp_type & HW_BREAKPOINT_R)
+ info->type |= HW_BRK_TYPE_READ;
+ if (bp->attr.bp_type & HW_BREAKPOINT_W)
+ info->type |= HW_BRK_TYPE_WRITE;
+ if (info->type == HW_BRK_TYPE_TRANSLATE)
+ /* must set alteast read or write */
return ret;
- }
-
+ if (!(bp->attr.exclude_user))
+ info->type |= HW_BRK_TYPE_USER;
+ if (!(bp->attr.exclude_kernel))
+ info->type |= HW_BRK_TYPE_KERNEL;
+ if (!(bp->attr.exclude_hv))
+ info->type |= HW_BRK_TYPE_HYP;
info->address = bp->attr.bp_addr;
info->len = bp->attr.bp_len;
- info->dabrx = DABRX_ALL;
- if (bp->attr.exclude_user)
- info->dabrx &= ~DABRX_USER;
- if (bp->attr.exclude_kernel)
- info->dabrx &= ~DABRX_KERNEL;
- if (bp->attr.exclude_hv)
- info->dabrx &= ~DABRX_HYP;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -184,8 +171,16 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
* 'symbolsize' should satisfy the check below.
*/
+ length_max = 8; /* DABR */
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ length_max = 512 ; /* 64 doublewords */
+ /* DAWR region can't cross 512 boundary */
+ if ((bp->attr.bp_addr >> 10) !=
+ ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+ return -EINVAL;
+ }
if (info->len >
- (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
+ (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
@@ -204,7 +199,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
info = counter_arch_bp(tsk->thread.last_hit_ubp);
regs->msr &= ~MSR_SE;
- set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
+ set_breakpoint(info);
tsk->thread.last_hit_ubp = NULL;
}
@@ -222,7 +217,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
- set_dabr(0, 0);
+ hw_breakpoint_disable();
/*
* The counter may be concurrently released but that can only
@@ -255,8 +250,9 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
* we still need to single-step the instruction, but we don't
* generate an event.
*/
- info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
- (dar - bp->attr.bp_addr < bp->attr.bp_len));
+ if (!((bp->attr.bp_addr <= dar) &&
+ (dar - bp->attr.bp_addr < bp->attr.bp_len)))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
@@ -285,10 +281,10 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
- if (!info->extraneous_interrupt)
+ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp, regs);
- set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
+ set_breakpoint(info);
out:
rcu_read_unlock();
return rc;
@@ -317,10 +313,10 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
* We shall invoke the user-defined callback function in the single
* stepping handler to confirm to 'trigger-after-execute' semantics
*/
- if (!info->extraneous_interrupt)
+ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp, regs);
- set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
+ set_breakpoint(info);
current->thread.last_hit_ubp = NULL;
/*
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 3656e88..de91cf1 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,11 +33,6 @@
#include <asm/runlatch.h>
#include <asm/smp.h>
-#ifdef CONFIG_HOTPLUG_CPU
-#define cpu_should_die() cpu_is_offline(smp_processor_id())
-#else
-#define cpu_should_die() 0
-#endif
unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(cpuidle_disable);
@@ -50,70 +45,40 @@ static int __init powersave_off(char *arg)
}
__setup("powersave=off", powersave_off);
-/*
- * The body of the idle task.
- */
-void cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched() && !cpu_should_die()) {
- ppc64_runlatch_off();
-
- if (ppc_md.power_save) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- /*
- * smp_mb is so clearing of TIF_POLLING_NRFLAG
- * is ordered w.r.t. need_resched() test.
- */
- smp_mb();
- local_irq_disable();
-
- /* Don't trace irqs off for idle */
- stop_critical_timings();
-
- /* check again after disabling irqs */
- if (!need_resched() && !cpu_should_die()) {
-#if !defined(CONFIG_DEBUG_CW)
- ppc_md.power_save();
+ sched_preempt_enable_no_resched();
+ cpu_die();
+}
#endif
- }
-
- start_critical_timings();
- /* Some power_save functions return with
- * interrupts enabled, some don't.
- */
- if (irqs_disabled())
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- } else {
- /*
- * Go into low thread priority and possibly
- * low power mode.
- */
- HMT_low();
- HMT_very_low();
- }
- }
+void arch_cpu_idle(void)
+{
+ ppc64_runlatch_off();
- HMT_medium();
- ppc64_runlatch_on();
- rcu_idle_exit();
- tick_nohz_idle_exit();
-#ifdef CONFIG_FSL_ERRATUM_A_006184
- mtspr(SPRN_TSR, TSR_ENW);
+ if (ppc_md.power_save) {
+#if !defined(CONFIG_DEBUG_CW)
+ ppc_md.power_save();
#endif
- if (cpu_should_die()) {
- sched_preempt_enable_no_resched();
- cpu_die();
- }
- schedule_preempt_disabled();
+ /*
+ * Some power_save functions return with
+ * interrupts enabled, some don't.
+ */
+ if (irqs_disabled())
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ /*
+ * Go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
}
+
+ HMT_medium();
+ ppc64_runlatch_on();
}
int powersave_nap;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c862fd7..c0d0dbd 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -102,7 +102,7 @@ static int __init fail_iommu_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
- return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+ return PTR_RET(dir);
}
late_initcall(fail_iommu_debugfs);
@@ -717,6 +717,13 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
return;
}
+ /*
+ * In case we have reserved the first bit, we should not emit
+ * the warning below.
+ */
+ if (tbl->it_offset == 0)
+ clear_bit(0, tbl->it_map);
+
/* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size))
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 71413f4..ea185e0 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -122,8 +122,8 @@ static inline notrace int decrementer_check_overflow(void)
}
/* This is called whenever we are re-enabling interrupts
- * and returns either 0 (nothing to do) or 500/900 if there's
- * either an EE or a DEC to generate.
+ * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
+ * there's an EE, DEC or DBELL to generate.
*
* This is called in two contexts: From arch_local_irq_restore()
* before soft-enabling interrupts, and from the exception exit
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled
*/
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- if (decrementer_check_overflow())
+ if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
/* Finally check if an external interrupt happened */
@@ -182,6 +182,13 @@ notrace unsigned int __check_irq_replay(void)
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (happened & PACA_IRQ_DBELL)
return 0x280;
+#else
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
+ if (happened & PACA_IRQ_DBELL) {
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0xe80;
+ return 0xa00;
+ }
#endif /* CONFIG_PPC_BOOK3E */
/* There should be nothing left ! */
@@ -367,6 +374,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
+#ifdef CONFIG_PPC_DOORBELL
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ seq_printf(p, "%*s: ", prec, "DBL");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
+ seq_printf(p, " Doorbell interrupts\n");
+ }
+#endif
+
return 0;
}
@@ -380,6 +396,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
+#ifdef CONFIG_PPC_DOORBELL
+ sum += per_cpu(irq_stat, cpu).doorbell_irqs;
+#endif
return sum;
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index a7bc752..c1eef24 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -159,7 +159,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
+ backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
@@ -199,7 +199,7 @@ static int kgdb_iabr_match(struct pt_regs *regs)
return 1;
}
-static int kgdb_dabr_match(struct pt_regs *regs)
+static int kgdb_break_match(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
@@ -459,7 +459,7 @@ static void *old__debugger;
static void *old__debugger_bpt;
static void *old__debugger_sstep;
static void *old__debugger_iabr_match;
-static void *old__debugger_dabr_match;
+static void *old__debugger_break_match;
static void *old__debugger_fault_handler;
int kgdb_arch_init(void)
@@ -469,7 +469,7 @@ int kgdb_arch_init(void)
old__debugger_bpt = __debugger_bpt;
old__debugger_sstep = __debugger_sstep;
old__debugger_iabr_match = __debugger_iabr_match;
- old__debugger_dabr_match = __debugger_dabr_match;
+ old__debugger_break_match = __debugger_break_match;
old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_call_nmi_hook;
@@ -477,7 +477,7 @@ int kgdb_arch_init(void)
__debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep;
__debugger_iabr_match = kgdb_iabr_match;
- __debugger_dabr_match = kgdb_dabr_match;
+ __debugger_break_match = kgdb_break_match;
__debugger_fault_handler = kgdb_not_implemented;
return 0;
@@ -490,6 +490,6 @@ void kgdb_arch_exit(void)
__debugger_bpt = old__debugger_bpt;
__debugger_sstep = old__debugger_sstep;
__debugger_iabr_match = old__debugger_iabr_match;
- __debugger_dabr_match = old__debugger_dabr_match;
+ __debugger_break_match = old__debugger_break_match;
__debugger_fault_handler = old__debugger_fault_handler;
}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e88c643..11f5b03 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a61b133..6782221 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void)
end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
/* Free the tmp space we don't need */
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, NULL);
}
static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index f5725bc..d92f387 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -41,8 +41,6 @@
/* #define LPARCFG_DEBUG */
-static struct proc_dir_entry *proc_ppc64_lparcfg;
-
/*
* Track sum of all purrs across all processors. This is used to further
* calculate usage values by different applications
@@ -301,6 +299,7 @@ static void parse_system_parameter_string(struct seq_file *m)
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
+ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
if (call_status != 0) {
@@ -688,27 +687,22 @@ static const struct file_operations lparcfg_fops = {
static int __init lparcfg_init(void)
{
- struct proc_dir_entry *ent;
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR))
mode |= S_IWUSR;
- ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
- if (!ent) {
+ if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
return -EIO;
}
-
- proc_ppc64_lparcfg = ent;
return 0;
}
static void __exit lparcfg_cleanup(void)
{
- if (proc_ppc64_lparcfg)
- remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
+ remove_proc_subtree("powerpc/lparcfg", NULL);
}
module_init(lparcfg_init);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 7206701..611acdf 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
+#include <linux/hardirq.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -162,6 +163,8 @@ static int kexec_all_irq_disabled = 0;
static void kexec_smp_down(void *arg)
{
local_irq_disable();
+ hard_irq_disable();
+
mb(); /* make sure our irqs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
while(kexec_all_irq_disabled == 0)
@@ -244,6 +247,8 @@ static void kexec_prepare_cpus(void)
wake_offline_cpus();
smp_call_function(kexec_smp_down, NULL, /* wait */0);
local_irq_disable();
+ hard_irq_disable();
+
mb(); /* make sure IRQs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
@@ -281,6 +286,7 @@ static void kexec_prepare_cpus(void)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
local_irq_disable();
+ hard_irq_disable();
}
#endif /* SMP */
@@ -330,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
pr_debug("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
+ * We setup preempt_count to avoid using VMX in memcpy.
* XXX: the task struct will likely be invalid once we do the copy!
*/
kexec_stack.thread_info.task = current_thread_info()->task;
kexec_stack.thread_info.flags = 0;
+ kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
+ kexec_stack.thread_info.cpu = current_thread_info()->cpu;
/* We need a static PACA, too; copy this CPU's PACA over and switch to
* it. Also poison per_cpu_offset to catch anyone using non-static
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 19e096b..e469f30 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
li r3,2
blr
+_GLOBAL(__bswapdi2)
+ rotlwi r9,r4,8
+ rotlwi r10,r3,8
+ rlwimi r9,r4,24,0,7
+ rlwimi r10,r3,24,0,7
+ rlwimi r9,r4,24,16,23
+ rlwimi r10,r3,24,16,23
+ mr r3,r9
+ mr r4,r10
+ blr
+
_GLOBAL(abs)
srawi r4,r3,31
xor r3,r3,r4
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6371ea9..27c3403 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
+_GLOBAL(__bswapdi2)
+ srdi r8,r3,32
+ rlwinm r7,r3,8,0xffffffff
+ rlwimi r7,r3,24,0,7
+ rlwinm r9,r8,8,0xffffffff
+ rlwimi r7,r3,24,16,23
+ rlwimi r9,r8,24,0,7
+ rlwimi r9,r8,24,16,23
+ sldi r7,r7,32
+ or r3,r7,r9
+ blr
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bec1e93..48fbc2b 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -511,8 +511,7 @@ int __init nvram_scan_partitions(void)
"detected: 0-length partition\n");
goto out;
}
- tmp_part = (struct nvram_partition *)
- kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
+ tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
err = -ENOMEM;
if (!tmp_part) {
printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 07c1269..a7b7430 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -71,10 +71,8 @@ static int of_pci_phb_probe(struct platform_device *dev)
eeh_dev_phb_init_dynamic(phb);
/* Register devices with EEH */
-#ifdef CONFIG_EEH
if (dev->dev.of_node->child)
eeh_add_device_tree_early(dev->dev.of_node);
-#endif /* CONFIG_EEH */
/* Scan the bus */
pcibios_scan_phb(phb);
@@ -88,13 +86,14 @@ static int of_pci_phb_probe(struct platform_device *dev)
pcibios_claim_one_bus(phb->bus);
/* Finish EEH setup */
-#ifdef CONFIG_EEH
eeh_add_device_tree_late(phb->bus);
-#endif
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
+ /* sysfs files should only be added after devices are added */
+ eeh_add_sysfs_files(phb->bus);
+
return 0;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 08ee413..f9dd9be 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -120,8 +120,6 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
struct paca_struct *paca;
EXPORT_SYMBOL(paca);
-struct paca_struct boot_paca;
-
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
{
/* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 8384282..4ebb1f2 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -358,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
enum pci_mmap_state mmap_state,
int write_combine)
{
- unsigned long prot = pgprot_val(protection);
/* Write combine is always 0 on non-memory space mappings. On
* memory space, if the user didn't pass 1, we check for a
@@ -375,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
/* XXX would be nice to have a way to ask for write-through */
if (write_combine)
- return pgprot_noncached_wc(prot);
+ return pgprot_noncached_wc(protection);
else
- return pgprot_noncached(prot);
+ return pgprot_noncached(protection);
}
/*
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
* ranges. However, some machines (thanks Apple !) tend to split their
* space into lots of small contiguous ranges. So we have to coalesce.
*
- * - We can only cope with all memory ranges having the same offset
- * between CPU addresses and PCI addresses. Unfortunately, some bridges
- * are setup for a large 1:1 mapping along with a small "window" which
- * maps PCI address 0 to some arbitrary high address of the CPU space in
- * order to give access to the ISA memory hole.
- * The way out of here that I've chosen for now is to always set the
- * offset based on the first resource found, then override it if we
- * have a different offset and the previous was set by an ISA hole.
- *
* - Some busses have IO space not starting at 0, which causes trouble with
* the way we do our IO resource renumbering. The code somewhat deals with
* it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
int rlen;
int pna = of_n_addr_cells(dev);
int np = pna + 5;
- int memno = 0, isa_hole = -1;
+ int memno = 0;
u32 pci_space;
unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
- unsigned long long isa_mb = 0;
struct resource *res;
printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,30 +767,14 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
}
/* Handles ISA memory hole space here */
if (pci_addr == 0) {
- isa_mb = cpu_addr;
- isa_hole = memno;
if (primary || isa_mem_base == 0)
isa_mem_base = cpu_addr;
hose->isa_mem_phys = cpu_addr;
hose->isa_mem_size = size;
}
- /* We get the PCI/Mem offset from the first range or
- * the, current one if the offset came from an ISA
- * hole. If they don't match, bugger.
- */
- if (memno == 0 ||
- (isa_hole >= 0 && pci_addr != 0 &&
- hose->pci_mem_offset == isa_mb))
- hose->pci_mem_offset = cpu_addr - pci_addr;
- else if (pci_addr != 0 &&
- hose->pci_mem_offset != cpu_addr - pci_addr) {
- printk(KERN_INFO
- " \\--> Skipped (offset mismatch) !\n");
- continue;
- }
-
/* Build resource */
+ hose->mem_offset[memno] = cpu_addr - pci_addr;
res = &hose->mem_resources[memno++];
res->flags = IORESOURCE_MEM;
if (pci_space & 0x40000000)
@@ -816,20 +790,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->child = NULL;
}
}
-
- /* If there's an ISA hole and the pci_mem_offset is -not- matching
- * the ISA hole offset, then we need to remove the ISA hole from
- * the resource list for that brige
- */
- if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
- unsigned int next = isa_hole + 1;
- printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
- if (next < memno)
- memmove(&hose->mem_resources[isa_hole],
- &hose->mem_resources[next],
- sizeof(struct resource) * (memno - next));
- hose->mem_resources[--memno].flags = 0;
- }
}
/* Decide whether to display the domain number in /proc */
@@ -844,6 +804,14 @@ int pci_proc_domain(struct pci_bus *bus)
return 1;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (ppc_md.pcibios_root_bridge_prepare)
+ return ppc_md.pcibios_root_bridge_prepare(bridge);
+
+ return 0;
+}
+
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
@@ -859,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
+ struct pci_bus_region reg;
if (!res->flags)
continue;
@@ -867,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
+ pcibios_resource_to_bus(dev, &reg, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
- (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+ (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
@@ -907,6 +877,7 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
struct pci_controller *hose = pci_bus_to_host(bus);
struct pci_dev *dev = bus->self;
resource_size_t offset;
+ struct pci_bus_region region;
u16 command;
int i;
@@ -916,10 +887,10 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
/* Job is a bit different between memory and IO */
if (res->flags & IORESOURCE_MEM) {
- /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
- * initialized by somebody
- */
- if (res->start != hose->pci_mem_offset)
+ pcibios_resource_to_bus(dev, &region, res);
+
+ /* If the BAR is non-0 then it's probably been initialized */
+ if (region.start != 0)
return 0;
/* The BAR is 0, let's check if memory decoding is enabled on
@@ -931,11 +902,11 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
/* Memory decoding is enabled and the BAR is 0. If any of the bridge
* resources covers that starting address (0 then it's good enough for
- * us for memory
+ * us for memory space)
*/
for (i = 0; i < 3; i++) {
if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
- hose->mem_resources[i].start == hose->pci_mem_offset)
+ hose->mem_resources[i].start == hose->mem_offset[i])
return 0;
}
@@ -1023,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus);
}
-void pcibios_setup_device(struct pci_dev *dev)
+static void pcibios_setup_device(struct pci_dev *dev)
{
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
@@ -1044,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
ppc_md.pci_irq_fixup(dev);
}
+int pcibios_add_device(struct pci_dev *dev)
+{
+ /*
+ * We can only call pcibios_setup_device() after bus setup is complete,
+ * since some of the platform specific DMA setup code depends on it.
+ */
+ if (dev->bus->is_added)
+ pcibios_setup_device(dev);
+ return 0;
+}
+
void pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1372,10 +1354,9 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
no_io:
/* Check for memory */
- offset = hose->pci_mem_offset;
- pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
for (i = 0; i < 3; i++) {
pres = &hose->mem_resources[i];
+ offset = hose->mem_offset[i];
if (!(pres->flags & IORESOURCE_MEM))
continue;
pr_debug("hose mem res: %pR\n", pres);
@@ -1482,11 +1463,14 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
+ /* Fixup EEH */
+ eeh_add_device_tree_late(bus);
+
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(bus);
- /* Fixup EEH */
- eeh_add_device_tree_late(bus);
+ /* sysfs files should only be added after devices are added */
+ eeh_add_sysfs_files(bus);
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
@@ -1512,6 +1496,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
struct list_head *resources)
{
struct resource *res;
+ resource_size_t offset;
int i;
/* Hookup PHB IO resource */
@@ -1521,49 +1506,38 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
printk(KERN_WARNING "PCI: I/O resource not set for host"
" bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
-#ifdef CONFIG_PPC32
- /* Workaround for lack of IO resource only on 32-bit */
- res->start = (unsigned long)hose->io_base_virt - isa_io_base;
- res->end = res->start + IO_SPACE_LIMIT;
- res->flags = IORESOURCE_IO;
-#endif /* CONFIG_PPC32 */
- }
+ } else {
+ offset = pcibios_io_space_offset(hose);
- pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long)res->flags);
- pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
+ pr_debug("PCI: PHB IO resource = %08llx-%08llx [%lx] off 0x%08llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags,
+ (unsigned long long)offset);
+ pci_add_resource_offset(resources, res, offset);
+ }
/* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
if (!res->flags) {
- if (i > 0)
- continue;
- printk(KERN_ERR "PCI: Memory resource 0 not set for "
- "host bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
-#ifdef CONFIG_PPC32
- /* Workaround for lack of MEM resource only on 32-bit */
- res->start = hose->pci_mem_offset;
- res->end = (resource_size_t)-1LL;
- res->flags = IORESOURCE_MEM;
-#endif /* CONFIG_PPC32 */
+ if (i == 0)
+ printk(KERN_ERR "PCI: Memory resource 0 not set for "
+ "host bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
+ continue;
}
+ offset = hose->mem_offset[i];
- pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
+
+ pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
(unsigned long long)res->start,
(unsigned long long)res->end,
- (unsigned long)res->flags);
- pci_add_resource_offset(resources, res, hose->pci_mem_offset);
- }
-
- pr_debug("PCI: PHB MEM offset = %016llx\n",
- (unsigned long long)hose->pci_mem_offset);
- pr_debug("PCI: PHB IO offset = %08lx\n",
- (unsigned long)hose->io_base_virt - _IO_BASE);
+ (unsigned long)res->flags,
+ (unsigned long long)offset);
+ pci_add_resource_offset(resources, res, offset);
+ }
}
/*
@@ -1731,3 +1705,15 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+static void fixup_vga(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
+ vga_set_default_device(pdev);
+
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index e37c215..432459c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -295,7 +295,7 @@ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
+ return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 51a133a..2e86296 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -246,7 +246,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
+ return (long)hose->mem_offset[0];
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
@@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
+
+static void quirk_radeon_32bit_msi(struct pci_dev *dev)
+{
+ struct pci_dn *pdn = pci_get_pdn(dev);
+
+ if (pdn)
+ pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index e7af165..df03844 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,6 +32,14 @@
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
+{
+ struct device_node *dn = pci_device_to_OF_node(pdev);
+ if (!dn)
+ return NULL;
+ return PCI_DN(dn);
+}
+
/*
* Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
index 02fb0ee..a27c914 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/kernel/ppc32.h
@@ -16,30 +16,6 @@
/* These are here to support 32-bit syscalls on a 64-bit kernel. */
-#define __old_sigaction32 old_sigaction32
-
-struct __old_sigaction32 {
- compat_uptr_t sa_handler;
- compat_old_sigset_t sa_mask;
- unsigned int sa_flags;
- compat_uptr_t sa_restorer; /* not used by Linux/SPARC yet */
-};
-
-
-
-struct sigaction32 {
- compat_uptr_t sa_handler; /* Really a pointer, but need to deal with 32 bits */
- unsigned int sa_flags;
- compat_uptr_t sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
-};
-
-typedef struct sigaltstack_32 {
- unsigned int ss_sp;
- int ss_flags;
- compat_size_t ss_size;
-} stack_32_t;
-
struct pt_regs32 {
unsigned int gpr[32];
unsigned int nip;
@@ -75,7 +51,7 @@ struct mcontext32 {
struct ucontext32 {
unsigned int uc_flags;
unsigned int uc_link;
- stack_32_t uc_stack;
+ compat_stack_t uc_stack;
int uc_pad[7];
compat_uptr_t uc_regs; /* points to uc_mcontext field */
compat_sigset_t uc_sigmask; /* mask last for extensibility */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d45b14d..c7be028 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
int __ucmpdi2(unsigned long long, unsigned long long);
EXPORT_SYMBOL(__ucmpdi2);
#endif
-
+long long __bswapdi2(long long);
+EXPORT_SYMBOL(__bswapdi2);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index c8ae371..feb8580 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -32,8 +32,6 @@
static loff_t page_map_seek( struct file *file, loff_t off, int whence)
{
loff_t new;
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
-
switch(whence) {
case 0:
new = off;
@@ -42,12 +40,12 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
new = file->f_pos + off;
break;
case 2:
- new = dp->size + off;
+ new = PAGE_SIZE + off;
break;
default:
return -EINVAL;
}
- if ( new < 0 || new > dp->size )
+ if ( new < 0 || new > PAGE_SIZE )
return -EINVAL;
return (file->f_pos = new);
}
@@ -55,19 +53,18 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
+ return simple_read_from_buffer(buf, nbytes, ppos,
+ PDE_DATA(file_inode(file)), PAGE_SIZE);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
-
- if ((vma->vm_end - vma->vm_start) > dp->size)
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE)
return -EINVAL;
- remap_pfn_range(vma, vma->vm_start, __pa(dp->data) >> PAGE_SHIFT,
- dp->size, vma->vm_page_prot);
+ remap_pfn_range(vma, vma->vm_start,
+ __pa(PDE_DATA(file_inode(file))) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
return 0;
}
@@ -86,7 +83,7 @@ static int __init proc_ppc64_init(void)
&page_map_fops, vdso_data);
if (!pde)
return 1;
- pde->size = PAGE_SIZE;
+ proc_set_size(pde, PAGE_SIZE);
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8d9b73c..70eb4c2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -50,6 +50,7 @@
#include <asm/runlatch.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
+#include <asm/tm.h>
#include <asm/debug.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
@@ -57,6 +58,13 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+/* Transactional Memory debug */
+#ifdef TM_DEBUG_SW
+#define TM_DEBUG(x...) printk(KERN_INFO x)
+#else
+#define TM_DEBUG(x...) do { } while(0)
+#endif
+
extern unsigned long _get_SP(void);
#ifndef CONFIG_SMP
@@ -271,7 +279,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
force_sig_info(SIGTRAP, &info, current);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
-void do_dabr(struct pt_regs *regs, unsigned long address,
+void do_break (struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
siginfo_t info;
@@ -281,11 +289,11 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
11, SIGSEGV) == NOTIFY_STOP)
return;
- if (debugger_dabr_match(regs))
+ if (debugger_break_match(regs))
return;
- /* Clear the DABR */
- set_dabr(0, 0);
+ /* Clear the breakpoint */
+ hw_breakpoint_disable();
/* Deliver the signal to userspace */
info.si_signo = SIGTRAP;
@@ -296,7 +304,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-static DEFINE_PER_CPU(unsigned long, current_dabr);
+static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -372,39 +380,217 @@ EXPORT_SYMBOL(switch_booke_debug_regs);
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread)
{
- if (thread->dabr) {
- thread->dabr = 0;
- thread->dabrx = 0;
- set_dabr(0, 0);
- }
+ thread->hw_brk.address = 0;
+ thread->hw_brk.type = 0;
+ set_breakpoint(&thread->hw_brk);
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-int set_dabr(unsigned long dabr, unsigned long dabrx)
-{
- __get_cpu_var(current_dabr) = dabr;
-
- if (ppc_md.set_dabr)
- return ppc_md.set_dabr(dabr, dabrx);
-
- /* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+{
mtspr(SPRN_DAC1, dabr);
#ifdef CONFIG_PPC_47x
isync();
#endif
+ return 0;
+}
#elif defined(CONFIG_PPC_BOOK3S)
+static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+{
mtspr(SPRN_DABR, dabr);
- mtspr(SPRN_DABRX, dabrx);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
+ return 0;
+}
+#else
+static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+{
+ return -EINVAL;
+}
#endif
+
+static inline int set_dabr(struct arch_hw_breakpoint *brk)
+{
+ unsigned long dabr, dabrx;
+
+ dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
+ dabrx = ((brk->type >> 3) & 0x7);
+
+ if (ppc_md.set_dabr)
+ return ppc_md.set_dabr(dabr, dabrx);
+
+ return __set_dabr(dabr, dabrx);
+}
+
+static inline int set_dawr(struct arch_hw_breakpoint *brk)
+{
+ unsigned long dawr, dawrx, mrd;
+
+ dawr = brk->address;
+
+ dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
+ << (63 - 58); //* read/write bits */
+ dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
+ << (63 - 59); //* translate */
+ dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
+ >> 3; //* PRIM bits */
+ /* dawr length is stored in field MDR bits 48:53. Matches range in
+ doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+ 0b111111=64DW.
+ brk->len is in bytes.
+ This aligns up to double word size, shifts and does the bias.
+ */
+ mrd = ((brk->len + 7) >> 3) - 1;
+ dawrx |= (mrd & 0x3f) << (63 - 53);
+
+ if (ppc_md.set_dawr)
+ return ppc_md.set_dawr(dawr, dawrx);
+ mtspr(SPRN_DAWR, dawr);
+ mtspr(SPRN_DAWRX, dawrx);
return 0;
}
+int set_breakpoint(struct arch_hw_breakpoint *brk)
+{
+ __get_cpu_var(current_brk) = *brk;
+
+ if (cpu_has_feature(CPU_FTR_DAWR))
+ return set_dawr(brk);
+
+ return set_dabr(brk);
+}
+
#ifdef CONFIG_PPC64
DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
#endif
+static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
+ struct arch_hw_breakpoint *b)
+{
+ if (a->address != b->address)
+ return false;
+ if (a->type != b->type)
+ return false;
+ if (a->len != b->len)
+ return false;
+ return true;
+}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline void tm_reclaim_task(struct task_struct *tsk)
+{
+ /* We have to work out if we're switching from/to a task that's in the
+ * middle of a transaction.
+ *
+ * In switching we need to maintain a 2nd register state as
+ * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
+ * checkpointed (tbegin) state in ckpt_regs and saves the transactional
+ * (current) FPRs into oldtask->thread.transact_fpr[].
+ *
+ * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
+ */
+ struct thread_struct *thr = &tsk->thread;
+
+ if (!thr->regs)
+ return;
+
+ if (!MSR_TM_ACTIVE(thr->regs->msr))
+ goto out_and_saveregs;
+
+ /* Stash the original thread MSR, as giveup_fpu et al will
+ * modify it. We hold onto it to see whether the task used
+ * FP & vector regs.
+ */
+ thr->tm_orig_msr = thr->regs->msr;
+
+ TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
+ "ccr=%lx, msr=%lx, trap=%lx)\n",
+ tsk->pid, thr->regs->nip,
+ thr->regs->ccr, thr->regs->msr,
+ thr->regs->trap);
+
+ tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
+
+ TM_DEBUG("--- tm_reclaim on pid %d complete\n",
+ tsk->pid);
+
+out_and_saveregs:
+ /* Always save the regs here, even if a transaction's not active.
+ * This context-switches a thread's TM info SPRs. We do it here to
+ * be consistent with the restore path (in recheckpoint) which
+ * cannot happen later in _switch().
+ */
+ tm_save_sprs(thr);
+}
+
+static inline void tm_recheckpoint_new_task(struct task_struct *new)
+{
+ unsigned long msr;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return;
+
+ /* Recheckpoint the registers of the thread we're about to switch to.
+ *
+ * If the task was using FP, we non-lazily reload both the original and
+ * the speculative FP register states. This is because the kernel
+ * doesn't see if/when a TM rollback occurs, so if we take an FP
+ * unavoidable later, we are unable to determine which set of FP regs
+ * need to be restored.
+ */
+ if (!new->thread.regs)
+ return;
+
+ /* The TM SPRs are restored here, so that TEXASR.FS can be set
+ * before the trecheckpoint and no explosion occurs.
+ */
+ tm_restore_sprs(&new->thread);
+
+ if (!MSR_TM_ACTIVE(new->thread.regs->msr))
+ return;
+ msr = new->thread.tm_orig_msr;
+ /* Recheckpoint to restore original checkpointed register state. */
+ TM_DEBUG("*** tm_recheckpoint of pid %d "
+ "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
+ new->pid, new->thread.regs->msr, msr);
+
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&new->thread, msr);
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
+ do_load_up_transact_fpu(&new->thread);
+ new->thread.regs->msr |=
+ (MSR_FP | new->thread.fpexc_mode);
+ }
+#ifdef CONFIG_ALTIVEC
+ if (msr & MSR_VEC) {
+ do_load_up_transact_altivec(&new->thread);
+ new->thread.regs->msr |= MSR_VEC;
+ }
+#endif
+ /* We may as well turn on VSX too since all the state is restored now */
+ if (msr & MSR_VSX)
+ new->thread.regs->msr |= MSR_VSX;
+
+ TM_DEBUG("*** tm_recheckpoint of pid %d complete "
+ "(kernel msr 0x%lx)\n",
+ new->pid, mfmsr());
+}
+
+static inline void __switch_to_tm(struct task_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_TM)) {
+ tm_enable();
+ tm_reclaim_task(prev);
+ }
+}
+#else
+#define tm_recheckpoint_new_task(new)
+#define __switch_to_tm(prev)
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
@@ -415,6 +601,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ __switch_to_tm(prev);
+
#ifdef CONFIG_SMP
/* avoid complexity of lazy save/restore of fpu
* by just saving it every time we switch out if
@@ -489,8 +677,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
- set_dabr(new->thread.dabr, new->thread.dabrx);
+ if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+ set_breakpoint(&new->thread.hw_brk);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -530,6 +718,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
* of sync. Hard disable here.
*/
hard_irq_disable();
+
+ tm_recheckpoint_new_task(new);
+
last = _switch(old_thread, new_thread);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -649,6 +840,8 @@ void show_regs(struct pt_regs * regs)
{
int i, trap;
+ show_regs_print_info(KERN_DEFAULT);
+
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %p TRAP: %04lx %s (%s)\n",
@@ -668,12 +861,6 @@ void show_regs(struct pt_regs * regs)
#else
printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
#endif
- printk("TASK = %p[%d] '%s' THREAD: %p",
- current, task_pid_nr(current), current->comm, task_thread_info(current));
-
-#ifdef CONFIG_SMP
- printk(" CPU: %d", raw_smp_processor_id());
-#endif /* CONFIG_SMP */
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
@@ -691,6 +878,9 @@ void show_regs(struct pt_regs * regs)
printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
+#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
show_instructions(regs);
@@ -727,10 +917,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- flush_ptrace_hw_breakpoint(src);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
*dst = *src;
return 0;
}
@@ -794,6 +980,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
* do some house keeping and then return from the fork or clone
* system call, using the stack frame created above.
*/
+ ((unsigned long *)sp)[0] = 0;
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
@@ -801,6 +988,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ p->thread.ptrace_bps[0] = NULL;
+#endif
+
#ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
@@ -821,6 +1012,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.dscr_inherit = current->thread.dscr_inherit;
p->thread.dscr = current->thread.dscr;
}
+ if (cpu_has_feature(CPU_FTR_HAS_PPR))
+ p->thread.ppr = INIT_PPR;
#endif
/*
* The PPC64 ABI makes use of a TOC to contain function
@@ -900,7 +1093,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->msr = MSR_USER32;
}
#endif
-
discard_lazy_cpu_state();
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
@@ -920,6 +1112,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.spefscr = 0;
current->thread.used_spe = 0;
#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM))
+ regs->msr |= MSR_TM;
+ current->thread.tm_tfhar = 0;
+ current->thread.tm_texasr = 0;
+ current->thread.tm_tfiar = 0;
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
@@ -1169,15 +1368,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
} while (count++ < kstack_depth_to_print);
}
-void dump_stack(void)
-{
- show_stack(current, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
@@ -1190,7 +1383,7 @@ void __ppc64_runlatch_on(void)
}
/* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 779f340..5eccda9 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -66,8 +66,8 @@
* is running at whatever address it has been loaded at.
* On ppc32 we compile with -mrelocatable, which means that references
* to extern and static variables get relocated automatically.
- * On ppc64 we have to relocate the references explicitly with
- * RELOC. (Note that strings count as static variables.)
+ * ppc64 objects are always relocatable, we just need to relocate the
+ * TOC.
*
* Because OF may have mapped I/O devices into the area starting at
* KERNELBASE, particularly on CHRP machines, we can't safely call
@@ -79,13 +79,11 @@
* On ppc64, 64 bit values are truncated to 32 bits (and
* fortunately don't get interpreted as two arguments).
*/
+#define ADDR(x) (u32)(unsigned long)(x)
+
#ifdef CONFIG_PPC64
-#define RELOC(x) (*PTRRELOC(&(x)))
-#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
#define OF_WORKAROUNDS 0
#else
-#define RELOC(x) (x)
-#define ADDR(x) (u32) (x)
#define OF_WORKAROUNDS of_workarounds
int of_workarounds;
#endif
@@ -95,7 +93,7 @@ int of_workarounds;
#define PROM_BUG() do { \
prom_printf("kernel BUG at %s line 0x%x!\n", \
- RELOC(__FILE__), __LINE__); \
+ __FILE__, __LINE__); \
__asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
} while (0)
@@ -233,7 +231,7 @@ static int __init call_prom(const char *service, int nargs, int nret, ...)
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
- if (enter_prom(&args, RELOC(prom_entry)) < 0)
+ if (enter_prom(&args, prom_entry) < 0)
return PROM_ERROR;
return (nret > 0) ? args.args[nargs] : 0;
@@ -258,7 +256,7 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
for (i = 0; i < nret; i++)
args.args[nargs+i] = 0;
- if (enter_prom(&args, RELOC(prom_entry)) < 0)
+ if (enter_prom(&args, prom_entry) < 0)
return PROM_ERROR;
if (rets != NULL)
@@ -272,20 +270,19 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
static void __init prom_print(const char *msg)
{
const char *p, *q;
- struct prom_t *_prom = &RELOC(prom);
- if (_prom->stdout == 0)
+ if (prom.stdout == 0)
return;
for (p = msg; *p != 0; p = q) {
for (q = p; *q != 0 && *q != '\n'; ++q)
;
if (q > p)
- call_prom("write", 3, 1, _prom->stdout, p, q - p);
+ call_prom("write", 3, 1, prom.stdout, p, q - p);
if (*q == 0)
break;
++q;
- call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
+ call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
}
}
@@ -294,7 +291,6 @@ static void __init prom_print_hex(unsigned long val)
{
int i, nibbles = sizeof(val)*2;
char buf[sizeof(val)*2+1];
- struct prom_t *_prom = &RELOC(prom);
for (i = nibbles-1; i >= 0; i--) {
buf[i] = (val & 0xf) + '0';
@@ -303,7 +299,7 @@ static void __init prom_print_hex(unsigned long val)
val >>= 4;
}
buf[nibbles] = '\0';
- call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
+ call_prom("write", 3, 1, prom.stdout, buf, nibbles);
}
/* max number of decimal digits in an unsigned long */
@@ -312,7 +308,6 @@ static void __init prom_print_dec(unsigned long val)
{
int i, size;
char buf[UL_DIGITS+1];
- struct prom_t *_prom = &RELOC(prom);
for (i = UL_DIGITS-1; i >= 0; i--) {
buf[i] = (val % 10) + '0';
@@ -322,7 +317,7 @@ static void __init prom_print_dec(unsigned long val)
}
/* shift stuff down */
size = UL_DIGITS - i;
- call_prom("write", 3, 1, _prom->stdout, buf+i, size);
+ call_prom("write", 3, 1, prom.stdout, buf+i, size);
}
static void __init prom_printf(const char *format, ...)
@@ -331,22 +326,18 @@ static void __init prom_printf(const char *format, ...)
va_list args;
unsigned long v;
long vs;
- struct prom_t *_prom = &RELOC(prom);
va_start(args, format);
-#ifdef CONFIG_PPC64
- format = PTRRELOC(format);
-#endif
for (p = format; *p != 0; p = q) {
for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
;
if (q > p)
- call_prom("write", 3, 1, _prom->stdout, p, q - p);
+ call_prom("write", 3, 1, prom.stdout, p, q - p);
if (*q == 0)
break;
if (*q == '\n') {
++q;
- call_prom("write", 3, 1, _prom->stdout,
+ call_prom("write", 3, 1, prom.stdout,
ADDR("\r\n"), 2);
continue;
}
@@ -368,7 +359,7 @@ static void __init prom_printf(const char *format, ...)
++q;
vs = va_arg(args, int);
if (vs < 0) {
- prom_print(RELOC("-"));
+ prom_print("-");
vs = -vs;
}
prom_print_dec(vs);
@@ -389,7 +380,7 @@ static void __init prom_printf(const char *format, ...)
++q;
vs = va_arg(args, long);
if (vs < 0) {
- prom_print(RELOC("-"));
+ prom_print("-");
vs = -vs;
}
prom_print_dec(vs);
@@ -403,7 +394,6 @@ static void __init prom_printf(const char *format, ...)
static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
unsigned long align)
{
- struct prom_t *_prom = &RELOC(prom);
if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
/*
@@ -414,21 +404,21 @@ static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
prom_arg_t result;
ret = call_prom_ret("call-method", 5, 2, &result,
- ADDR("claim"), _prom->memory,
+ ADDR("claim"), prom.memory,
align, size, virt);
if (ret != 0 || result == -1)
return -1;
ret = call_prom_ret("call-method", 5, 2, &result,
- ADDR("claim"), _prom->mmumap,
+ ADDR("claim"), prom.mmumap,
align, size, virt);
if (ret != 0) {
call_prom("call-method", 4, 1, ADDR("release"),
- _prom->memory, size, virt);
+ prom.memory, size, virt);
return -1;
}
/* the 0x12 is M (coherence) + PP == read/write */
call_prom("call-method", 6, 1,
- ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
+ ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
return virt;
}
return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
@@ -437,13 +427,10 @@ static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
static void __init __attribute__((noreturn)) prom_panic(const char *reason)
{
-#ifdef CONFIG_PPC64
- reason = PTRRELOC(reason);
-#endif
prom_print(reason);
/* Do not call exit because it clears the screen on pmac
* it also causes some sort of double-fault on early pmacs */
- if (RELOC(of_platform) == PLATFORM_POWERMAC)
+ if (of_platform == PLATFORM_POWERMAC)
asm("trap\n");
/* ToDo: should put up an SRC here on pSeries */
@@ -525,13 +512,13 @@ static int __init prom_setprop(phandle node, const char *nodename,
add_string(&p, tohex((u32)(unsigned long) value));
add_string(&p, tohex(valuelen));
add_string(&p, tohex(ADDR(pname)));
- add_string(&p, tohex(strlen(RELOC(pname))));
+ add_string(&p, tohex(strlen(pname)));
add_string(&p, "property");
*p = 0;
return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
}
-/* We can't use the standard versions because of RELOC headaches. */
+/* We can't use the standard versions because of relocation headaches. */
#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
|| ('a' <= (c) && (c) <= 'f') \
|| ('A' <= (c) && (c) <= 'F'))
@@ -598,59 +585,53 @@ unsigned long prom_memparse(const char *ptr, const char **retptr)
*/
static void __init early_cmdline_parse(void)
{
- struct prom_t *_prom = &RELOC(prom);
const char *opt;
char *p;
int l = 0;
- RELOC(prom_cmd_line[0]) = 0;
- p = RELOC(prom_cmd_line);
- if ((long)_prom->chosen > 0)
- l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
+ prom_cmd_line[0] = 0;
+ p = prom_cmd_line;
+ if ((long)prom.chosen > 0)
+ l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
#ifdef CONFIG_CMDLINE
if (l <= 0 || p[0] == '\0') /* dbl check */
- strlcpy(RELOC(prom_cmd_line),
- RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
+ strlcpy(prom_cmd_line,
+ CONFIG_CMDLINE, sizeof(prom_cmd_line));
#endif /* CONFIG_CMDLINE */
- prom_printf("command line: %s\n", RELOC(prom_cmd_line));
+ prom_printf("command line: %s\n", prom_cmd_line);
#ifdef CONFIG_PPC64
- opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
+ opt = strstr(prom_cmd_line, "iommu=");
if (opt) {
prom_printf("iommu opt is: %s\n", opt);
opt += 6;
while (*opt && *opt == ' ')
opt++;
- if (!strncmp(opt, RELOC("off"), 3))
- RELOC(prom_iommu_off) = 1;
- else if (!strncmp(opt, RELOC("force"), 5))
- RELOC(prom_iommu_force_on) = 1;
+ if (!strncmp(opt, "off", 3))
+ prom_iommu_off = 1;
+ else if (!strncmp(opt, "force", 5))
+ prom_iommu_force_on = 1;
}
#endif
- opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
+ opt = strstr(prom_cmd_line, "mem=");
if (opt) {
opt += 4;
- RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
+ prom_memory_limit = prom_memparse(opt, (const char **)&opt);
#ifdef CONFIG_PPC64
/* Align to 16 MB == size of ppc64 large page */
- RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
+ prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
#endif
}
}
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
- * There are two methods for telling firmware what our capabilities are.
- * Newer machines have an "ibm,client-architecture-support" method on the
- * root node. For older machines, we have to call the "process-elf-header"
- * method in the /packages/elf-loader node, passing it a fake 32-bit
- * ELF header containing a couple of PT_NOTE sections that contain
- * structures that contain various information.
- */
-
-/*
- * New method - extensible architecture description vector.
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ *
+ * See prom.h for the definition of the bits specified in the
+ * architecture vector.
*
* Because the description vector contains a mix of byte and word
* values, we declare it as an unsigned char array, and use this
@@ -659,65 +640,7 @@ static void __init early_cmdline_parse(void)
#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
((x) >> 8) & 0xff, (x) & 0xff
-/* Option vector bits - generic bits in byte 1 */
-#define OV_IGNORE 0x80 /* ignore this vector */
-#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
-
-/* Option vector 1: processor architectures supported */
-#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
-#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
-#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
-#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
-#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
-#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
-#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
-#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
-
-/* Option vector 2: Open Firmware options supported */
-#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
-
-/* Option vector 3: processor options supported */
-#define OV3_FP 0x80 /* floating point */
-#define OV3_VMX 0x40 /* VMX/Altivec */
-#define OV3_DFP 0x20 /* decimal FP */
-
-/* Option vector 4: IBM PAPR implementation */
-#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
-
-/* Option vector 5: PAPR/OF options supported */
-#define OV5_LPAR 0x80 /* logical partitioning supported */
-#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
-/* ibm,dynamic-reconfiguration-memory property supported */
-#define OV5_DRCONF_MEMORY 0x20
-#define OV5_LARGE_PAGES 0x10 /* large pages supported */
-#define OV5_DONATE_DEDICATE_CPU 0x02 /* donate dedicated CPU support */
-/* PCIe/MSI support. Without MSI full PCIe is not supported */
-#ifdef CONFIG_PCI_MSI
-#define OV5_MSI 0x01 /* PCIe/MSI support */
-#else
-#define OV5_MSI 0x00
-#endif /* CONFIG_PCI_MSI */
-#ifdef CONFIG_PPC_SMLPAR
-#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
-#define OV5_XCMO 0x40 /* Page Coalescing */
-#else
-#define OV5_CMO 0x00
-#define OV5_XCMO 0x00
-#endif
-#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
-#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
-#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
-#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
-#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */
-
-/* Option Vector 6: IBM PAPR hints */
-#define OV6_LINUX 0x02 /* Linux is our OS */
-
-/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- */
-static unsigned char ibm_architecture_vec[] = {
+unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
@@ -761,11 +684,21 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
19 - 2, /* length */
0, /* don't ignore, don't halt */
- OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
- OV5_DONATE_DEDICATE_CPU | OV5_MSI,
+ OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
+ OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
+#ifdef CONFIG_PCI_MSI
+ /* PCIe/MSI support. Without MSI full PCIe is not supported */
+ OV5_FEAT(OV5_MSI),
+#else
0,
- OV5_CMO | OV5_XCMO,
- OV5_TYPE1_AFFINITY,
+#endif
+ 0,
+#ifdef CONFIG_PPC_SMLPAR
+ OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
+#else
+ 0,
+#endif
+ OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
0,
0,
0,
@@ -779,8 +712,9 @@ static unsigned char ibm_architecture_vec[] = {
0,
0,
0,
- OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
- OV5_SUB_PROCESSORS,
+ OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
+ OV5_FEAT(OV5_PFO_HW_842),
+ OV5_FEAT(OV5_SUB_PROCESSORS),
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,
@@ -887,7 +821,7 @@ static int __init prom_count_smt_threads(void)
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("cpu")))
+ if (strcmp(type, "cpu"))
continue;
/*
* There is an entry for each smt thread, each entry being
@@ -929,7 +863,7 @@ static void __init prom_send_capabilities(void)
* (we assume this is the same for all cores) and use it to
* divide NR_CPUS.
*/
- cores = (u32 *)PTRRELOC(&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]);
+ cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
if (*cores != NR_CPUS) {
prom_printf("WARNING ! "
"ibm_architecture_vec structure inconsistent: %lu!\n",
@@ -1005,21 +939,21 @@ static void __init prom_send_capabilities(void)
*/
static unsigned long __init alloc_up(unsigned long size, unsigned long align)
{
- unsigned long base = RELOC(alloc_bottom);
+ unsigned long base = alloc_bottom;
unsigned long addr = 0;
if (align)
base = _ALIGN_UP(base, align);
prom_debug("alloc_up(%x, %x)\n", size, align);
- if (RELOC(ram_top) == 0)
+ if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
if (align)
- base = _ALIGN_UP(RELOC(alloc_bottom), align);
+ base = _ALIGN_UP(alloc_bottom, align);
else
- base = RELOC(alloc_bottom);
+ base = alloc_bottom;
- for(; (base + size) <= RELOC(alloc_top);
+ for(; (base + size) <= alloc_top;
base = _ALIGN_UP(base + 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
@@ -1031,14 +965,14 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
}
if (addr == 0)
return 0;
- RELOC(alloc_bottom) = addr + size;
+ alloc_bottom = addr + size;
prom_debug(" -> %x\n", addr);
- prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
- prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
- prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
- prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
- prom_debug(" ram_top : %x\n", RELOC(ram_top));
+ prom_debug(" alloc_bottom : %x\n", alloc_bottom);
+ prom_debug(" alloc_top : %x\n", alloc_top);
+ prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
+ prom_debug(" rmo_top : %x\n", rmo_top);
+ prom_debug(" ram_top : %x\n", ram_top);
return addr;
}
@@ -1054,32 +988,32 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
unsigned long base, addr = 0;
prom_debug("alloc_down(%x, %x, %s)\n", size, align,
- highmem ? RELOC("(high)") : RELOC("(low)"));
- if (RELOC(ram_top) == 0)
+ highmem ? "(high)" : "(low)");
+ if (ram_top == 0)
prom_panic("alloc_down() called with mem not initialized\n");
if (highmem) {
/* Carve out storage for the TCE table. */
- addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
- if (addr <= RELOC(alloc_bottom))
+ addr = _ALIGN_DOWN(alloc_top_high - size, align);
+ if (addr <= alloc_bottom)
return 0;
/* Will we bump into the RMO ? If yes, check out that we
* didn't overlap existing allocations there, if we did,
* we are dead, we must be the first in town !
*/
- if (addr < RELOC(rmo_top)) {
+ if (addr < rmo_top) {
/* Good, we are first */
- if (RELOC(alloc_top) == RELOC(rmo_top))
- RELOC(alloc_top) = RELOC(rmo_top) = addr;
+ if (alloc_top == rmo_top)
+ alloc_top = rmo_top = addr;
else
return 0;
}
- RELOC(alloc_top_high) = addr;
+ alloc_top_high = addr;
goto bail;
}
- base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
- for (; base > RELOC(alloc_bottom);
+ base = _ALIGN_DOWN(alloc_top - size, align);
+ for (; base > alloc_bottom;
base = _ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%x\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
@@ -1089,15 +1023,15 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
}
if (addr == 0)
return 0;
- RELOC(alloc_top) = addr;
+ alloc_top = addr;
bail:
prom_debug(" -> %x\n", addr);
- prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
- prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
- prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
- prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
- prom_debug(" ram_top : %x\n", RELOC(ram_top));
+ prom_debug(" alloc_bottom : %x\n", alloc_bottom);
+ prom_debug(" alloc_top : %x\n", alloc_top);
+ prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
+ prom_debug(" rmo_top : %x\n", rmo_top);
+ prom_debug(" ram_top : %x\n", ram_top);
return addr;
}
@@ -1137,7 +1071,7 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
static void __init reserve_mem(u64 base, u64 size)
{
u64 top = base + size;
- unsigned long cnt = RELOC(mem_reserve_cnt);
+ unsigned long cnt = mem_reserve_cnt;
if (size == 0)
return;
@@ -1152,9 +1086,9 @@ static void __init reserve_mem(u64 base, u64 size)
if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
prom_panic("Memory reserve map exhausted !\n");
- RELOC(mem_reserve_map)[cnt].base = base;
- RELOC(mem_reserve_map)[cnt].size = size;
- RELOC(mem_reserve_cnt) = cnt + 1;
+ mem_reserve_map[cnt].base = base;
+ mem_reserve_map[cnt].size = size;
+ mem_reserve_cnt = cnt + 1;
}
/*
@@ -1167,7 +1101,6 @@ static void __init prom_init_mem(void)
char *path, type[64];
unsigned int plen;
cell_t *p, *endp;
- struct prom_t *_prom = &RELOC(prom);
u32 rac, rsc;
/*
@@ -1176,14 +1109,14 @@ static void __init prom_init_mem(void)
* 2) top of memory
*/
rac = 2;
- prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
+ prom_getprop(prom.root, "#address-cells", &rac, sizeof(rac));
rsc = 1;
- prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
+ prom_getprop(prom.root, "#size-cells", &rsc, sizeof(rsc));
prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
prom_debug("scanning memory:\n");
- path = RELOC(prom_scratch);
+ path = prom_scratch;
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -1196,15 +1129,15 @@ static void __init prom_init_mem(void)
*/
prom_getprop(node, "name", type, sizeof(type));
}
- if (strcmp(type, RELOC("memory")))
+ if (strcmp(type, "memory"))
continue;
- plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
+ plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
if (plen > sizeof(regbuf)) {
prom_printf("memory node too large for buffer !\n");
plen = sizeof(regbuf);
}
- p = RELOC(regbuf);
+ p = regbuf;
endp = p + (plen / sizeof(cell_t));
#ifdef DEBUG_PROM
@@ -1222,14 +1155,14 @@ static void __init prom_init_mem(void)
if (size == 0)
continue;
prom_debug(" %x %x\n", base, size);
- if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR))
- RELOC(rmo_top) = size;
- if ((base + size) > RELOC(ram_top))
- RELOC(ram_top) = base + size;
+ if (base == 0 && (of_platform & PLATFORM_LPAR))
+ rmo_top = size;
+ if ((base + size) > ram_top)
+ ram_top = base + size;
}
}
- RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
+ alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
@@ -1237,20 +1170,20 @@ static void __init prom_init_mem(void)
* TCE's up there.
*/
- RELOC(alloc_top_high) = RELOC(ram_top);
+ alloc_top_high = ram_top;
- if (RELOC(prom_memory_limit)) {
- if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
+ if (prom_memory_limit) {
+ if (prom_memory_limit <= alloc_bottom) {
prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
- } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
+ prom_memory_limit);
+ prom_memory_limit = 0;
+ } else if (prom_memory_limit >= ram_top) {
prom_printf("Ignoring mem=%x >= ram_top.\n",
- RELOC(prom_memory_limit));
- RELOC(prom_memory_limit) = 0;
+ prom_memory_limit);
+ prom_memory_limit = 0;
} else {
- RELOC(ram_top) = RELOC(prom_memory_limit);
- RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
+ ram_top = prom_memory_limit;
+ rmo_top = min(rmo_top, prom_memory_limit);
}
}
@@ -1262,36 +1195,35 @@ static void __init prom_init_mem(void)
* Since 768MB is plenty of room, and we need to cap to something
* reasonable on 32-bit, cap at 768MB on all machines.
*/
- if (!RELOC(rmo_top))
- RELOC(rmo_top) = RELOC(ram_top);
- RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
- RELOC(alloc_top) = RELOC(rmo_top);
- RELOC(alloc_top_high) = RELOC(ram_top);
+ if (!rmo_top)
+ rmo_top = ram_top;
+ rmo_top = min(0x30000000ul, rmo_top);
+ alloc_top = rmo_top;
+ alloc_top_high = ram_top;
/*
* Check if we have an initrd after the kernel but still inside
* the RMO. If we do move our bottom point to after it.
*/
- if (RELOC(prom_initrd_start) &&
- RELOC(prom_initrd_start) < RELOC(rmo_top) &&
- RELOC(prom_initrd_end) > RELOC(alloc_bottom))
- RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
+ if (prom_initrd_start &&
+ prom_initrd_start < rmo_top &&
+ prom_initrd_end > alloc_bottom)
+ alloc_bottom = PAGE_ALIGN(prom_initrd_end);
prom_printf("memory layout at init:\n");
- prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
- prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
- prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
- prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
- prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
- prom_printf(" ram_top : %x\n", RELOC(ram_top));
+ prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
+ prom_printf(" alloc_bottom : %x\n", alloc_bottom);
+ prom_printf(" alloc_top : %x\n", alloc_top);
+ prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
+ prom_printf(" rmo_top : %x\n", rmo_top);
+ prom_printf(" ram_top : %x\n", ram_top);
}
static void __init prom_close_stdin(void)
{
- struct prom_t *_prom = &RELOC(prom);
ihandle val;
- if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
+ if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0)
call_prom("close", 1, 0, val);
}
@@ -1332,19 +1264,19 @@ static void __init prom_query_opal(void)
}
prom_printf("Querying for OPAL presence... ");
- rc = opal_query_takeover(&RELOC(prom_opal_size),
- &RELOC(prom_opal_align));
+ rc = opal_query_takeover(&prom_opal_size,
+ &prom_opal_align);
prom_debug("(rc = %ld) ", rc);
if (rc != 0) {
prom_printf("not there.\n");
return;
}
- RELOC(of_platform) = PLATFORM_OPAL;
+ of_platform = PLATFORM_OPAL;
prom_printf(" there !\n");
- prom_debug(" opal_size = 0x%lx\n", RELOC(prom_opal_size));
- prom_debug(" opal_align = 0x%lx\n", RELOC(prom_opal_align));
- if (RELOC(prom_opal_align) < 0x10000)
- RELOC(prom_opal_align) = 0x10000;
+ prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
+ prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
+ if (prom_opal_align < 0x10000)
+ prom_opal_align = 0x10000;
}
static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
@@ -1365,8 +1297,8 @@ static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
for (i = 0; i < nret; ++i)
rtas_args.rets[i] = 0;
- opal_enter_rtas(&rtas_args, RELOC(prom_rtas_data),
- RELOC(prom_rtas_entry));
+ opal_enter_rtas(&rtas_args, prom_rtas_data,
+ prom_rtas_entry);
if (nret > 1 && outputs != NULL)
for (i = 0; i < nret-1; ++i)
@@ -1381,9 +1313,8 @@ static void __init prom_opal_hold_cpus(void)
phandle node;
char type[64];
u32 servers[8];
- struct prom_t *_prom = &RELOC(prom);
- void *entry = (unsigned long *)&RELOC(opal_secondary_entry);
- struct opal_secondary_data *data = &RELOC(opal_secondary_data);
+ void *entry = (unsigned long *)&opal_secondary_entry;
+ struct opal_secondary_data *data = &opal_secondary_data;
prom_debug("prom_opal_hold_cpus: start...\n");
prom_debug(" - entry = 0x%x\n", entry);
@@ -1396,12 +1327,12 @@ static void __init prom_opal_hold_cpus(void)
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("cpu")) != 0)
+ if (strcmp(type, "cpu") != 0)
continue;
/* Skip non-configured cpus. */
if (prom_getprop(node, "status", type, sizeof(type)) > 0)
- if (strcmp(type, RELOC("okay")) != 0)
+ if (strcmp(type, "okay") != 0)
continue;
cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
@@ -1412,7 +1343,7 @@ static void __init prom_opal_hold_cpus(void)
for (i = 0; i < cnt; i++) {
cpu = servers[i];
prom_debug("CPU %d ... ", cpu);
- if (cpu == _prom->cpu) {
+ if (cpu == prom.cpu) {
prom_debug("booted !\n");
continue;
}
@@ -1423,7 +1354,7 @@ static void __init prom_opal_hold_cpus(void)
* spinloop.
*/
data->ack = -1;
- rc = prom_rtas_call(RELOC(prom_rtas_start_cpu), 3, 1,
+ rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
NULL, cpu, entry, data);
prom_debug("rtas rc=%d ...", rc);
@@ -1443,21 +1374,21 @@ static void __init prom_opal_hold_cpus(void)
static void __init prom_opal_takeover(void)
{
- struct opal_secondary_data *data = &RELOC(opal_secondary_data);
+ struct opal_secondary_data *data = &opal_secondary_data;
struct opal_takeover_args *args = &data->args;
- u64 align = RELOC(prom_opal_align);
+ u64 align = prom_opal_align;
u64 top_addr, opal_addr;
- args->k_image = (u64)RELOC(_stext);
+ args->k_image = (u64)_stext;
args->k_size = _end - _stext;
args->k_entry = 0;
args->k_entry2 = 0x60;
top_addr = _ALIGN_UP(args->k_size, align);
- if (RELOC(prom_initrd_start) != 0) {
- args->rd_image = RELOC(prom_initrd_start);
- args->rd_size = RELOC(prom_initrd_end) - args->rd_image;
+ if (prom_initrd_start != 0) {
+ args->rd_image = prom_initrd_start;
+ args->rd_size = prom_initrd_end - args->rd_image;
args->rd_loc = top_addr;
top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
}
@@ -1469,13 +1400,13 @@ static void __init prom_opal_takeover(void)
* has plenty of memory, and we ask for the HAL for now to
* be just below the 1G point, or above the initrd
*/
- opal_addr = _ALIGN_DOWN(0x40000000 - RELOC(prom_opal_size), align);
+ opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
if (opal_addr < top_addr)
opal_addr = top_addr;
args->hal_addr = opal_addr;
/* Copy the command line to the kernel image */
- strlcpy(RELOC(boot_command_line), RELOC(prom_cmd_line),
+ strlcpy(boot_command_line, prom_cmd_line,
COMMAND_LINE_SIZE);
prom_debug(" k_image = 0x%lx\n", args->k_image);
@@ -1557,8 +1488,8 @@ static void __init prom_instantiate_opal(void)
&entry, sizeof(entry));
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
- RELOC(prom_opal_base) = base;
- RELOC(prom_opal_entry) = entry;
+ prom_opal_base = base;
+ prom_opal_entry = entry;
#endif
prom_debug("prom_instantiate_opal: end...\n");
}
@@ -1616,9 +1547,9 @@ static void __init prom_instantiate_rtas(void)
#ifdef CONFIG_PPC_POWERNV
/* PowerVN takeover hack */
- RELOC(prom_rtas_data) = base;
- RELOC(prom_rtas_entry) = entry;
- prom_getprop(rtas_node, "start-cpu", &RELOC(prom_rtas_start_cpu), 4);
+ prom_rtas_data = base;
+ prom_rtas_entry = entry;
+ prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
#endif
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
@@ -1693,20 +1624,20 @@ static void __init prom_initialize_tce_table(void)
phandle node;
ihandle phb_node;
char compatible[64], type[64], model[64];
- char *path = RELOC(prom_scratch);
+ char *path = prom_scratch;
u64 base, align;
u32 minalign, minsize;
u64 tce_entry, *tce_entryp;
u64 local_alloc_top, local_alloc_bottom;
u64 i;
- if (RELOC(prom_iommu_off))
+ if (prom_iommu_off)
return;
prom_debug("starting prom_initialize_tce_table\n");
/* Cache current top of allocs so we reserve a single block */
- local_alloc_top = RELOC(alloc_top_high);
+ local_alloc_top = alloc_top_high;
local_alloc_bottom = local_alloc_top;
/* Search all nodes looking for PHBs. */
@@ -1719,19 +1650,19 @@ static void __init prom_initialize_tce_table(void)
prom_getprop(node, "device_type", type, sizeof(type));
prom_getprop(node, "model", model, sizeof(model));
- if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
+ if ((type[0] == 0) || (strstr(type, "pci") == NULL))
continue;
/* Keep the old logic intact to avoid regression. */
if (compatible[0] != 0) {
- if ((strstr(compatible, RELOC("python")) == NULL) &&
- (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
- (strstr(compatible, RELOC("Winnipeg")) == NULL))
+ if ((strstr(compatible, "python") == NULL) &&
+ (strstr(compatible, "Speedwagon") == NULL) &&
+ (strstr(compatible, "Winnipeg") == NULL))
continue;
} else if (model[0] != 0) {
- if ((strstr(model, RELOC("ython")) == NULL) &&
- (strstr(model, RELOC("peedwagon")) == NULL) &&
- (strstr(model, RELOC("innipeg")) == NULL))
+ if ((strstr(model, "ython") == NULL) &&
+ (strstr(model, "peedwagon") == NULL) &&
+ (strstr(model, "innipeg") == NULL))
continue;
}
@@ -1810,8 +1741,8 @@ static void __init prom_initialize_tce_table(void)
/* These are only really needed if there is a memory limit in
* effect, but we don't know so export them always. */
- RELOC(prom_tce_alloc_start) = local_alloc_bottom;
- RELOC(prom_tce_alloc_end) = local_alloc_top;
+ prom_tce_alloc_start = local_alloc_bottom;
+ prom_tce_alloc_end = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
@@ -1848,7 +1779,6 @@ static void __init prom_hold_cpus(void)
unsigned int reg;
phandle node;
char type[64];
- struct prom_t *_prom = &RELOC(prom);
unsigned long *spinloop
= (void *) LOW_ADDR(__secondary_hold_spinloop);
unsigned long *acknowledge
@@ -1874,12 +1804,12 @@ static void __init prom_hold_cpus(void)
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("cpu")) != 0)
+ if (strcmp(type, "cpu") != 0)
continue;
/* Skip non-configured cpus. */
if (prom_getprop(node, "status", type, sizeof(type)) > 0)
- if (strcmp(type, RELOC("okay")) != 0)
+ if (strcmp(type, "okay") != 0)
continue;
reg = -1;
@@ -1893,7 +1823,7 @@ static void __init prom_hold_cpus(void)
*/
*acknowledge = (unsigned long)-1;
- if (reg != _prom->cpu) {
+ if (reg != prom.cpu) {
/* Primary Thread of non-boot cpu or any thread */
prom_printf("starting cpu hw idx %lu... ", reg);
call_prom("start-cpu", 3, 0, node,
@@ -1920,22 +1850,20 @@ static void __init prom_hold_cpus(void)
static void __init prom_init_client_services(unsigned long pp)
{
- struct prom_t *_prom = &RELOC(prom);
-
/* Get a handle to the prom entry point before anything else */
- RELOC(prom_entry) = pp;
+ prom_entry = pp;
/* get a handle for the stdout device */
- _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
- if (!PHANDLE_VALID(_prom->chosen))
+ prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
+ if (!PHANDLE_VALID(prom.chosen))
prom_panic("cannot find chosen"); /* msg won't be printed :( */
/* get device tree root */
- _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
- if (!PHANDLE_VALID(_prom->root))
+ prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
+ if (!PHANDLE_VALID(prom.root))
prom_panic("cannot find device tree root"); /* msg won't be printed :( */
- _prom->mmumap = 0;
+ prom.mmumap = 0;
}
#ifdef CONFIG_PPC32
@@ -1946,7 +1874,6 @@ static void __init prom_init_client_services(unsigned long pp)
*/
static void __init prom_find_mmu(void)
{
- struct prom_t *_prom = &RELOC(prom);
phandle oprom;
char version[64];
@@ -1964,10 +1891,10 @@ static void __init prom_find_mmu(void)
call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
} else
return;
- _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
- prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
- sizeof(_prom->mmumap));
- if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
+ prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
+ prom_getprop(prom.chosen, "mmu", &prom.mmumap,
+ sizeof(prom.mmumap));
+ if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
}
#else
@@ -1976,36 +1903,34 @@ static void __init prom_find_mmu(void)
static void __init prom_init_stdout(void)
{
- struct prom_t *_prom = &RELOC(prom);
- char *path = RELOC(of_stdout_device);
+ char *path = of_stdout_device;
char type[16];
u32 val;
- if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
+ if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
prom_panic("cannot find stdout");
- _prom->stdout = val;
+ prom.stdout = val;
/* Get the full OF pathname of the stdout device */
memset(path, 0, 256);
- call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
- val = call_prom("instance-to-package", 1, 1, _prom->stdout);
- prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
+ call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
+ val = call_prom("instance-to-package", 1, 1, prom.stdout);
+ prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
&val, sizeof(val));
- prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
- prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
+ prom_printf("OF stdout device is: %s\n", of_stdout_device);
+ prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
path, strlen(path) + 1);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(val, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("display")) == 0)
+ if (strcmp(type, "display") == 0)
prom_setprop(val, path, "linux,boot-display", NULL, 0);
}
static int __init prom_find_machine_type(void)
{
- struct prom_t *_prom = &RELOC(prom);
char compat[256];
int len, i = 0;
#ifdef CONFIG_PPC64
@@ -2014,7 +1939,7 @@ static int __init prom_find_machine_type(void)
#endif
/* Look for a PowerMac or a Cell */
- len = prom_getprop(_prom->root, "compatible",
+ len = prom_getprop(prom.root, "compatible",
compat, sizeof(compat)-1);
if (len > 0) {
compat[len] = 0;
@@ -2023,16 +1948,16 @@ static int __init prom_find_machine_type(void)
int sl = strlen(p);
if (sl == 0)
break;
- if (strstr(p, RELOC("Power Macintosh")) ||
- strstr(p, RELOC("MacRISC")))
+ if (strstr(p, "Power Macintosh") ||
+ strstr(p, "MacRISC"))
return PLATFORM_POWERMAC;
#ifdef CONFIG_PPC64
/* We must make sure we don't detect the IBM Cell
* blades as pSeries due to some firmware issues,
* so we do it here.
*/
- if (strstr(p, RELOC("IBM,CBEA")) ||
- strstr(p, RELOC("IBM,CPBW-1.0")))
+ if (strstr(p, "IBM,CBEA") ||
+ strstr(p, "IBM,CPBW-1.0"))
return PLATFORM_GENERIC;
#endif /* CONFIG_PPC64 */
i += sl + 1;
@@ -2049,11 +1974,11 @@ static int __init prom_find_machine_type(void)
* non-IBM designs !
* - it has /rtas
*/
- len = prom_getprop(_prom->root, "device_type",
+ len = prom_getprop(prom.root, "device_type",
compat, sizeof(compat)-1);
if (len <= 0)
return PLATFORM_GENERIC;
- if (strcmp(compat, RELOC("chrp")))
+ if (strcmp(compat, "chrp"))
return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
@@ -2115,11 +2040,11 @@ static void __init prom_check_displays(void)
for (node = 0; prom_next_node(&node); ) {
memset(type, 0, sizeof(type));
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("display")) != 0)
+ if (strcmp(type, "display") != 0)
continue;
/* It seems OF doesn't null-terminate the path :-( */
- path = RELOC(prom_scratch);
+ path = prom_scratch;
memset(path, 0, PROM_SCRATCH_SIZE);
/*
@@ -2143,15 +2068,15 @@ static void __init prom_check_displays(void)
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
- clut = RELOC(default_colors);
+ clut = default_colors;
for (i = 0; i < 16; i++, clut += 3)
if (prom_set_color(ih, i, clut[0], clut[1],
clut[2]) != 0)
break;
#ifdef CONFIG_LOGO_LINUX_CLUT224
- clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
- for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
+ clut = PTRRELOC(logo_linux_clut224.clut);
+ for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
if (prom_set_color(ih, i + 32, clut[0], clut[1],
clut[2]) != 0)
break;
@@ -2171,8 +2096,8 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
unsigned long room, chunk;
prom_debug("Chunk exhausted, claiming more at %x...\n",
- RELOC(alloc_bottom));
- room = RELOC(alloc_top) - RELOC(alloc_bottom);
+ alloc_bottom);
+ room = alloc_top - alloc_bottom;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
if (room < PAGE_SIZE)
@@ -2198,9 +2123,9 @@ static unsigned long __init dt_find_string(char *str)
{
char *s, *os;
- s = os = (char *)RELOC(dt_string_start);
+ s = os = (char *)dt_string_start;
s += 4;
- while (s < (char *)RELOC(dt_string_end)) {
+ while (s < (char *)dt_string_end) {
if (strcmp(s, str) == 0)
return s - os;
s += strlen(s) + 1;
@@ -2222,10 +2147,10 @@ static void __init scan_dt_build_strings(phandle node,
unsigned long soff;
phandle child;
- sstart = (char *)RELOC(dt_string_start);
+ sstart = (char *)dt_string_start;
/* get and store all property names */
- prev_name = RELOC("");
+ prev_name = "";
for (;;) {
/* 64 is max len of name including nul. */
namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
@@ -2236,9 +2161,9 @@ static void __init scan_dt_build_strings(phandle node,
}
/* skip "name" */
- if (strcmp(namep, RELOC("name")) == 0) {
+ if (strcmp(namep, "name") == 0) {
*mem_start = (unsigned long)namep;
- prev_name = RELOC("name");
+ prev_name = "name";
continue;
}
/* get/create string entry */
@@ -2249,7 +2174,7 @@ static void __init scan_dt_build_strings(phandle node,
} else {
/* Trim off some if we can */
*mem_start = (unsigned long)namep + strlen(namep) + 1;
- RELOC(dt_string_end) = *mem_start;
+ dt_string_end = *mem_start;
}
prev_name = namep;
}
@@ -2304,35 +2229,35 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
}
/* get it again for debugging */
- path = RELOC(prom_scratch);
+ path = prom_scratch;
memset(path, 0, PROM_SCRATCH_SIZE);
call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
/* get and store all properties */
- prev_name = RELOC("");
- sstart = (char *)RELOC(dt_string_start);
+ prev_name = "";
+ sstart = (char *)dt_string_start;
for (;;) {
if (call_prom("nextprop", 3, 1, node, prev_name,
- RELOC(pname)) != 1)
+ pname) != 1)
break;
/* skip "name" */
- if (strcmp(RELOC(pname), RELOC("name")) == 0) {
- prev_name = RELOC("name");
+ if (strcmp(pname, "name") == 0) {
+ prev_name = "name";
continue;
}
/* find string offset */
- soff = dt_find_string(RELOC(pname));
+ soff = dt_find_string(pname);
if (soff == 0) {
prom_printf("WARNING: Can't find string index for"
- " <%s>, node %s\n", RELOC(pname), path);
+ " <%s>, node %s\n", pname, path);
break;
}
prev_name = sstart + soff;
/* get length */
- l = call_prom("getproplen", 2, 1, node, RELOC(pname));
+ l = call_prom("getproplen", 2, 1, node, pname);
/* sanity checks */
if (l == PROM_ERROR)
@@ -2345,10 +2270,10 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
/* push property content */
valp = make_room(mem_start, mem_end, l, 4);
- call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
+ call_prom("getprop", 4, 1, node, pname, valp, l);
*mem_start = _ALIGN(*mem_start, 4);
- if (!strcmp(RELOC(pname), RELOC("phandle")))
+ if (!strcmp(pname, "phandle"))
has_phandle = 1;
}
@@ -2356,7 +2281,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
* existed (can happen with OPAL)
*/
if (!has_phandle) {
- soff = dt_find_string(RELOC("linux,phandle"));
+ soff = dt_find_string("linux,phandle");
if (soff == 0)
prom_printf("WARNING: Can't find string index for"
" <linux-phandle> node %s\n", path);
@@ -2384,7 +2309,6 @@ static void __init flatten_device_tree(void)
phandle root;
unsigned long mem_start, mem_end, room;
struct boot_param_header *hdr;
- struct prom_t *_prom = &RELOC(prom);
char *namep;
u64 *rsvmap;
@@ -2392,10 +2316,10 @@ static void __init flatten_device_tree(void)
* Check how much room we have between alloc top & bottom (+/- a
* few pages), crop to 1MB, as this is our "chunk" size
*/
- room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
+ room = alloc_top - alloc_bottom - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
- prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
+ prom_debug("starting device tree allocs at %x\n", alloc_bottom);
/* Now try to claim that */
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
@@ -2412,66 +2336,66 @@ static void __init flatten_device_tree(void)
mem_start = _ALIGN(mem_start, 4);
hdr = make_room(&mem_start, &mem_end,
sizeof(struct boot_param_header), 4);
- RELOC(dt_header_start) = (unsigned long)hdr;
+ dt_header_start = (unsigned long)hdr;
rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
/* Start of strings */
mem_start = PAGE_ALIGN(mem_start);
- RELOC(dt_string_start) = mem_start;
+ dt_string_start = mem_start;
mem_start += 4; /* hole */
/* Add "linux,phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- strcpy(namep, RELOC("linux,phandle"));
+ strcpy(namep, "linux,phandle");
mem_start = (unsigned long)namep + strlen(namep) + 1;
/* Build string array */
prom_printf("Building dt strings...\n");
scan_dt_build_strings(root, &mem_start, &mem_end);
- RELOC(dt_string_end) = mem_start;
+ dt_string_end = mem_start;
/* Build structure */
mem_start = PAGE_ALIGN(mem_start);
- RELOC(dt_struct_start) = mem_start;
+ dt_struct_start = mem_start;
prom_printf("Building dt structure...\n");
scan_dt_build_struct(root, &mem_start, &mem_end);
dt_push_token(OF_DT_END, &mem_start, &mem_end);
- RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
+ dt_struct_end = PAGE_ALIGN(mem_start);
/* Finish header */
- hdr->boot_cpuid_phys = _prom->cpu;
+ hdr->boot_cpuid_phys = prom.cpu;
hdr->magic = OF_DT_HEADER;
- hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
- hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
- hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
- hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
- hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
+ hdr->totalsize = dt_struct_end - dt_header_start;
+ hdr->off_dt_struct = dt_struct_start - dt_header_start;
+ hdr->off_dt_strings = dt_string_start - dt_header_start;
+ hdr->dt_strings_size = dt_string_end - dt_string_start;
+ hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - dt_header_start;
hdr->version = OF_DT_VERSION;
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
/* Copy the reserve map in */
- memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
+ memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
#ifdef DEBUG_PROM
{
int i;
prom_printf("reserved memory map:\n");
- for (i = 0; i < RELOC(mem_reserve_cnt); i++)
+ for (i = 0; i < mem_reserve_cnt; i++)
prom_printf(" %x - %x\n",
- RELOC(mem_reserve_map)[i].base,
- RELOC(mem_reserve_map)[i].size);
+ mem_reserve_map[i].base,
+ mem_reserve_map[i].size);
}
#endif
/* Bump mem_reserve_cnt to cause further reservations to fail
* since it's too late.
*/
- RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
+ mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
prom_printf("Device tree strings 0x%x -> 0x%x\n",
- RELOC(dt_string_start), RELOC(dt_string_end));
+ dt_string_start, dt_string_end);
prom_printf("Device tree struct 0x%x -> 0x%x\n",
- RELOC(dt_struct_start), RELOC(dt_struct_end));
+ dt_struct_start, dt_struct_end);
}
@@ -2526,7 +2450,6 @@ static void __init fixup_device_tree_maple_memory_controller(void)
phandle mc;
u32 mc_reg[4];
char *name = "/hostbridge@f8000000";
- struct prom_t *_prom = &RELOC(prom);
u32 ac, sc;
mc = call_prom("finddevice", 1, 1, ADDR(name));
@@ -2536,8 +2459,8 @@ static void __init fixup_device_tree_maple_memory_controller(void)
if (prom_getproplen(mc, "reg") != 8)
return;
- prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac));
- prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc));
+ prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
+ prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
if ((ac != 2) || (sc != 2))
return;
@@ -2806,50 +2729,94 @@ static void __init fixup_device_tree(void)
static void __init prom_find_boot_cpu(void)
{
- struct prom_t *_prom = &RELOC(prom);
u32 getprop_rval;
ihandle prom_cpu;
phandle cpu_pkg;
- _prom->cpu = 0;
- if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
+ prom.cpu = 0;
+ if (prom_getprop(prom.chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
return;
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
- _prom->cpu = getprop_rval;
+ prom.cpu = getprop_rval;
- prom_debug("Booting CPU hw index = %lu\n", _prom->cpu);
+ prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
}
static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
{
#ifdef CONFIG_BLK_DEV_INITRD
- struct prom_t *_prom = &RELOC(prom);
-
if (r3 && r4 && r4 != 0xdeadbeef) {
unsigned long val;
- RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
- RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
+ prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
+ prom_initrd_end = prom_initrd_start + r4;
- val = RELOC(prom_initrd_start);
- prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
+ val = prom_initrd_start;
+ prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
&val, sizeof(val));
- val = RELOC(prom_initrd_end);
- prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
+ val = prom_initrd_end;
+ prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
&val, sizeof(val));
- reserve_mem(RELOC(prom_initrd_start),
- RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
+ reserve_mem(prom_initrd_start,
+ prom_initrd_end - prom_initrd_start);
- prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
- prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
+ prom_debug("initrd_start=0x%x\n", prom_initrd_start);
+ prom_debug("initrd_end=0x%x\n", prom_initrd_end);
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_RELOCATABLE
+static void reloc_toc(void)
+{
+}
+
+static void unreloc_toc(void)
+{
+}
+#else
+static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
+{
+ unsigned long i;
+ unsigned long *toc_entry;
+
+ /* Get the start of the TOC by using r2 directly. */
+ asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
+
+ for (i = 0; i < nr_entries; i++) {
+ *toc_entry = *toc_entry + offset;
+ toc_entry++;
+ }
+}
+
+static void reloc_toc(void)
+{
+ unsigned long offset = reloc_offset();
+ unsigned long nr_entries =
+ (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
+
+ __reloc_toc(offset, nr_entries);
+
+ mb();
+}
+
+static void unreloc_toc(void)
+{
+ unsigned long offset = reloc_offset();
+ unsigned long nr_entries =
+ (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
+
+ mb();
+
+ __reloc_toc(-offset, nr_entries);
+}
+#endif
+#endif
/*
* We enter here early on, when the Open Firmware prom is still
@@ -2861,20 +2828,19 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unsigned long r6, unsigned long r7,
unsigned long kbase)
{
- struct prom_t *_prom;
unsigned long hdr;
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
+#else
+ reloc_toc();
#endif
- _prom = &RELOC(prom);
-
/*
* First zero the BSS
*/
- memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
+ memset(&__bss_start, 0, __bss_stop - __bss_start);
/*
* Init interface to Open Firmware, get some node references,
@@ -2893,14 +2859,14 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_stdout();
- prom_printf("Preparing to boot %s", RELOC(linux_banner));
+ prom_printf("Preparing to boot %s", linux_banner);
/*
* Get default machine type. At this point, we do not differentiate
* between pSeries SMP and pSeries LPAR
*/
- RELOC(of_platform) = prom_find_machine_type();
- prom_printf("Detected machine type: %x\n", RELOC(of_platform));
+ of_platform = prom_find_machine_type();
+ prom_printf("Detected machine type: %x\n", of_platform);
#ifndef CONFIG_NONSTATIC_KERNEL
/* Bail if this is a kdump kernel. */
@@ -2917,15 +2883,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* On pSeries, inform the firmware about our capabilities
*/
- if (RELOC(of_platform) == PLATFORM_PSERIES ||
- RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
+ if (of_platform == PLATFORM_PSERIES ||
+ of_platform == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
#endif
/*
* Copy the CPU hold code
*/
- if (RELOC(of_platform) != PLATFORM_POWERMAC)
+ if (of_platform != PLATFORM_POWERMAC)
copy_and_flush(0, kbase, 0x100, 0);
/*
@@ -2954,7 +2920,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* that uses the allocator, we need to make sure we get the top of memory
* available for us here...
*/
- if (RELOC(of_platform) == PLATFORM_PSERIES)
+ if (of_platform == PLATFORM_PSERIES)
prom_initialize_tce_table();
#endif
@@ -2962,19 +2928,19 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* On non-powermacs, try to instantiate RTAS. PowerMacs don't
* have a usable RTAS implementation.
*/
- if (RELOC(of_platform) != PLATFORM_POWERMAC &&
- RELOC(of_platform) != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC &&
+ of_platform != PLATFORM_OPAL)
prom_instantiate_rtas();
#ifdef CONFIG_PPC_POWERNV
/* Detect HAL and try instanciating it & doing takeover */
- if (RELOC(of_platform) == PLATFORM_PSERIES_LPAR) {
+ if (of_platform == PLATFORM_PSERIES_LPAR) {
prom_query_opal();
- if (RELOC(of_platform) == PLATFORM_OPAL) {
+ if (of_platform == PLATFORM_OPAL) {
prom_opal_hold_cpus();
prom_opal_takeover();
}
- } else if (RELOC(of_platform) == PLATFORM_OPAL)
+ } else if (of_platform == PLATFORM_OPAL)
prom_instantiate_opal();
#endif
@@ -2988,32 +2954,32 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*
* PowerMacs use a different mechanism to spin CPUs
*/
- if (RELOC(of_platform) != PLATFORM_POWERMAC &&
- RELOC(of_platform) != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC &&
+ of_platform != PLATFORM_OPAL)
prom_hold_cpus();
/*
* Fill in some infos for use by the kernel later on
*/
- if (RELOC(prom_memory_limit))
- prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
- &RELOC(prom_memory_limit),
+ if (prom_memory_limit)
+ prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
+ &prom_memory_limit,
sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
- if (RELOC(prom_iommu_off))
- prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
+ if (prom_iommu_off)
+ prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
NULL, 0);
- if (RELOC(prom_iommu_force_on))
- prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
+ if (prom_iommu_force_on)
+ prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
NULL, 0);
- if (RELOC(prom_tce_alloc_start)) {
- prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
- &RELOC(prom_tce_alloc_start),
+ if (prom_tce_alloc_start) {
+ prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
+ &prom_tce_alloc_start,
sizeof(prom_tce_alloc_start));
- prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
- &RELOC(prom_tce_alloc_end),
+ prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
+ &prom_tce_alloc_end,
sizeof(prom_tce_alloc_end));
}
#endif
@@ -3035,8 +3001,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* closed stdin already (in particular the powerbook 101). It
* appears that the OPAL version of OFW doesn't like it either.
*/
- if (RELOC(of_platform) != PLATFORM_POWERMAC &&
- RELOC(of_platform) != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC &&
+ of_platform != PLATFORM_OPAL)
prom_close_stdin();
/*
@@ -3051,22 +3017,24 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* tree and NULL as r5, thus triggering the new entry point which
* is common to us and kexec
*/
- hdr = RELOC(dt_header_start);
+ hdr = dt_header_start;
/* Don't print anything after quiesce under OPAL, it crashes OFW */
- if (RELOC(of_platform) != PLATFORM_OPAL) {
+ if (of_platform != PLATFORM_OPAL) {
prom_printf("returning from prom_init\n");
prom_debug("->dt_header_start=0x%x\n", hdr);
}
#ifdef CONFIG_PPC32
reloc_got2(-offset);
+#else
+ unreloc_toc();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
/* OPAL early debug gets the OPAL base & entry in r8 and r9 */
__start(hdr, kbase, 0, 0, 0,
- RELOC(prom_opal_base), RELOC(prom_opal_entry));
+ prom_opal_base, prom_opal_entry);
#else
__start(hdr, kbase, 0, 0, 0, 0, 0);
#endif
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 70f4286..3765da6 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -22,7 +22,7 @@ __secondary_hold_acknowledge __secondary_hold_spinloop __start
strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line"
+boot_command_line __prom_init_toc_start __prom_init_toc_end"
NM="$1"
OBJ="$2"
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c497000..98c2fc1 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,7 @@
#include <trace/syscall.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
+#include <linux/context_tracking.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -179,6 +180,31 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
return 0;
}
+#ifdef CONFIG_PPC64
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ *data = task->thread.dscr;
+ return 0;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ task->thread.dscr = dscr;
+ task->thread.dscr_inherit = 1;
+ return 0;
+}
+#else
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ return -EIO;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ return -EIO;
+}
+#endif
+
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
@@ -192,16 +218,23 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
/*
* Get contents of register REGNO in task TASK.
*/
-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
+int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
{
- if (task->thread.regs == NULL)
+ if ((task->thread.regs == NULL) || !data)
return -EIO;
- if (regno == PT_MSR)
- return get_user_msr(task);
+ if (regno == PT_MSR) {
+ *data = get_user_msr(task);
+ return 0;
+ }
- if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
- return ((unsigned long *)task->thread.regs)[regno];
+ if (regno == PT_DSCR)
+ return get_user_dscr(task, data);
+
+ if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
+ *data = ((unsigned long *)task->thread.regs)[regno];
+ return 0;
+ }
return -EIO;
}
@@ -218,6 +251,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
+ if (regno == PT_DSCR)
+ return set_user_dscr(task, data);
if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
@@ -905,6 +940,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+ struct arch_hw_breakpoint hw_brk;
+#endif
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
@@ -931,14 +969,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
*/
/* Ensure breakpoint translation bit is set */
- if (data && !(data & DABR_TRANSLATION))
+ if (data && !(data & HW_BRK_TYPE_TRANSLATE))
return -EIO;
+ hw_brk.address = data & (~HW_BRK_TYPE_DABR);
+ hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ hw_brk.len = 8;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(task) < 0)
return -ESRCH;
bp = thread->ptrace_bps[0];
- if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
+ if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
@@ -948,10 +989,8 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
}
if (bp) {
attr = bp->attr;
- attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
- arch_bp_generic_fields(data &
- (DABR_DATA_WRITE | DABR_DATA_READ),
- &attr.bp_type);
+ attr.bp_addr = hw_brk.address;
+ arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
/* Enable breakpoint */
attr.disabled = false;
@@ -963,16 +1002,15 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
}
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
- thread->dabr = data;
- thread->dabrx = DABRX_ALL;
+ thread->hw_brk = hw_brk;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
- attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
- arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
- &attr.bp_type);
+ attr.bp_addr = hw_brk.address;
+ arch_bp_generic_fields(hw_brk.type,
+ &attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, task);
@@ -985,10 +1023,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
ptrace_put_breakpoints(task);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
- /* Move contents to the DABR register */
- task->thread.dabr = data;
- task->thread.dabrx = DABRX_ALL;
+ task->thread.hw_brk = hw_brk;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
@@ -1349,7 +1384,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- unsigned long dabr;
+ struct arch_hw_breakpoint brk;
#endif
if (bp_info->version != 1)
@@ -1397,12 +1432,13 @@ static long ppc_set_hwdebug(struct task_struct *child,
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
- dabr = (unsigned long)bp_info->addr & ~7UL;
- dabr |= DABR_TRANSLATION;
+ brk.address = bp_info->addr & ~7UL;
+ brk.type = HW_BRK_TYPE_TRANSLATE;
+ brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- dabr |= DABR_DATA_READ;
+ brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- dabr |= DABR_DATA_WRITE;
+ brk.type |= HW_BRK_TYPE_WRITE;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
@@ -1427,8 +1463,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
attr.bp_len = len;
- arch_bp_generic_fields(dabr & (DABR_DATA_WRITE | DABR_DATA_READ),
- &attr.bp_type);
+ arch_bp_generic_fields(brk.type, &attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, child);
@@ -1445,11 +1480,10 @@ static long ppc_set_hwdebug(struct task_struct *child,
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
return -EINVAL;
- if (child->thread.dabr)
+ if (child->thread.hw_brk.address)
return -ENOSPC;
- child->thread.dabr = dabr;
- child->thread.dabrx = DABRX_ALL;
+ child->thread.hw_brk = brk;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
@@ -1495,10 +1529,11 @@ static long ppc_del_hwdebug(struct task_struct *child, long data)
ptrace_put_breakpoints(child);
return ret;
#else /* CONFIG_HAVE_HW_BREAKPOINT */
- if (child->thread.dabr == 0)
+ if (child->thread.hw_brk.address == 0)
return -ENOENT;
- child->thread.dabr = 0;
+ child->thread.hw_brk.address = 0;
+ child->thread.hw_brk.type = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
return 0;
@@ -1531,7 +1566,9 @@ long arch_ptrace(struct task_struct *child, long request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- tmp = ptrace_get_reg(child, (int) index);
+ ret = ptrace_get_reg(child, (int) index, &tmp);
+ if (ret)
+ break;
} else {
unsigned int fpidx = index - PT_FPR0;
@@ -1608,6 +1645,8 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
+ if (cpu_has_feature(CPU_FTR_DAWR))
+ dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else
dbginfo.features = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1642,6 +1681,9 @@ long arch_ptrace(struct task_struct *child, long request,
}
case PTRACE_GET_DEBUGREG: {
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dabr_fake;
+#endif
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
@@ -1649,7 +1691,9 @@ long arch_ptrace(struct task_struct *child, long request,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, datalp);
#else
- ret = put_user(child->thread.dabr, datalp);
+ dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ ret = put_user(dabr_fake, datalp);
#endif
break;
}
@@ -1745,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
+ user_exit();
+
secure_computing_strict(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1789,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
+
+ user_enter();
}
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8c21658..f51599e 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -95,7 +95,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- tmp = ptrace_get_reg(child, index);
+ ret = ptrace_get_reg(child, index, &tmp);
+ if (ret)
+ break;
} else {
flush_fp_to_thread(child);
/*
@@ -148,7 +150,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
tmp = ((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)];
} else { /* register within PT_REGS struct */
- tmp = ptrace_get_reg(child, numReg);
+ unsigned long tmp2;
+ ret = ptrace_get_reg(child, numReg, &tmp2);
+ if (ret)
+ break;
+ tmp = tmp2;
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
@@ -232,7 +238,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
- unsigned long freg = ptrace_get_reg(child, numReg);
+ unsigned long freg;
+ ret = ptrace_get_reg(child, numReg, &freg);
+ if (ret)
+ break;
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
@@ -252,6 +261,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
case PTRACE_GET_DEBUGREG: {
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dabr_fake;
+#endif
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
@@ -259,7 +271,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, (u32 __user *)data);
#else
- ret = put_user(child->thread.dabr, (u32 __user *)data);
+ dabr_fake = (
+ (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ ret = put_user(dabr_fake, (u32 __user *)data);
#endif
break;
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1fd6e7b..52add6f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/delay.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
}
+enum rtas_cpu_state {
+ DOWN,
+ UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ if (!cpumask_empty(cpus)) {
+ cpumask_clear(cpus);
+ return -EINVAL;
+ } else
+ return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ cpumask_var_t cpus)
+{
+ int cpu;
+ int cpuret = 0;
+ int ret = 0;
+
+ if (cpumask_empty(cpus))
+ return 0;
+
+ for_each_cpu(cpu, cpus) {
+ switch (state) {
+ case DOWN:
+ cpuret = cpu_down(cpu);
+ break;
+ case UP:
+ cpuret = cpu_up(cpu);
+ break;
+ }
+ if (cpuret) {
+ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+ __func__,
+ ((state == UP) ? "up" : "down"),
+ cpu, cpuret);
+ if (!ret)
+ ret = cpuret;
+ if (state == UP) {
+ /* clear bits for unchanged cpus, return */
+ cpumask_shift_right(cpus, cpus, cpu);
+ cpumask_shift_left(cpus, cpus, cpu);
+ break;
+ } else {
+ /* clear bit for unchanged cpu, continue */
+ cpumask_clear_cpu(cpu, cpus);
+ }
+ }
+ }
+
+ return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+ int ret;
+
+ ret = rtas_cpu_state_change_mask(UP, cpus);
+
+ if (ret) {
+ cpumask_var_t tmp_mask;
+
+ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+ return ret;
+
+ /* Use tmp_mask to preserve cpus mask from first failure */
+ cpumask_copy(tmp_mask, cpus);
+ rtas_offline_cpus_mask(tmp_mask);
+ free_cpumask_var(tmp_mask);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+ return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
int rtas_ibm_suspend_me(struct rtas_args *args)
{
long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
struct rtas_suspend_me_data data;
DECLARE_COMPLETION_ONSTACK(done);
+ cpumask_var_t offline_mask;
+ int cpuret;
if (!rtas_service_present("ibm,suspend-me"))
return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
return 0;
}
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
atomic_set(&data.working, 0);
atomic_set(&data.done, 0);
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+ cpuret = rtas_online_cpus_mask(offline_mask);
+ if (cpuret) {
+ pr_err("%s: Could not bring present CPUs online.\n", __func__);
+ atomic_set(&data.error, cpuret);
+ goto out;
+ }
+
stop_topology_update();
/* Call function on all CPUs. One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
start_topology_update();
+ /* Take down CPUs not online prior to suspend */
+ cpuret = rtas_offline_cpus_mask(offline_mask);
+ if (cpuret)
+ pr_warn("%s: Could not restore CPUs to offline state.\n",
+ __func__);
+
+out:
+ free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 8329190..2f3cdb0 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -57,13 +57,31 @@
#define VALIDATE_READY -1001 /* Firmware image ready for validation */
#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
-#define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */
-#define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */
-#define VALIDATE_INVALID_IMG 2 /* Validate Return Status */
-#define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */
-#define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */
-#define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */
-#define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */
+
+/* ibm,validate-flash-image update result tokens */
+#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
+#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
+#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
+#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
+/*
+ * Current T side will be committed to P side before being replace with new
+ * image, and the new image is downlevel from current image
+ */
+#define VALIDATE_TMP_COMMIT_DL 4
+/*
+ * Current T side will be committed to P side before being replaced with new
+ * image
+ */
+#define VALIDATE_TMP_COMMIT 5
+/*
+ * T side will be updated with a downlevel image
+ */
+#define VALIDATE_TMP_UPDATE_DL 6
+/*
+ * The candidate image's release date is later than the system's firmware
+ * service entitlement date - service warranty period has expired
+ */
+#define VALIDATE_OUT_OF_WRNTY 7
/* ibm,manage-flash-image operation tokens */
#define RTAS_REJECT_TMP_IMG 0
@@ -71,6 +89,7 @@
/* Array sizes */
#define VALIDATE_BUF_SIZE 4096
+#define VALIDATE_MSG_LEN 256
#define RTAS_MSG_MAXLEN 64
/* Quirk - RTAS requires 4k list length and block size */
@@ -102,9 +121,10 @@ static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
-/* Local copy of the flash block list.
- * We only allow one open of the flash proc file and create this
- * list as we go. The rtas_firmware_flash_list varable will be
+/*
+ * Local copy of the flash block list.
+ *
+ * The rtas_firmware_flash_list varable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
@@ -125,23 +145,23 @@ struct rtas_update_flash_t
struct rtas_manage_flash_t
{
int status; /* Returned status */
- unsigned int op; /* Reject or commit image */
};
/* Status int must be first member of struct */
struct rtas_validate_flash_t
{
int status; /* Returned status */
- char buf[VALIDATE_BUF_SIZE]; /* Candidate image buffer */
+ char *buf; /* Candidate image buffer */
unsigned int buf_size; /* Size of image buf */
unsigned int update_results; /* Update results token */
};
-static DEFINE_SPINLOCK(flash_file_open_lock);
-static struct proc_dir_entry *firmware_flash_pde;
-static struct proc_dir_entry *firmware_update_pde;
-static struct proc_dir_entry *validate_pde;
-static struct proc_dir_entry *manage_pde;
+static struct rtas_update_flash_t rtas_update_flash_data;
+static struct rtas_manage_flash_t rtas_manage_flash_data;
+static struct rtas_validate_flash_t rtas_validate_flash_data;
+static DEFINE_MUTEX(rtas_update_flash_mutex);
+static DEFINE_MUTEX(rtas_manage_flash_mutex);
+static DEFINE_MUTEX(rtas_validate_flash_mutex);
/* Do simple sanity checks on the flash image. */
static int flash_list_valid(struct flash_block_list *flist)
@@ -191,10 +211,10 @@ static void free_flash_list(struct flash_block_list *f)
static int rtas_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_update_flash_t *uf;
-
- uf = (struct rtas_update_flash_t *) dp->data;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
+
+ mutex_lock(&rtas_update_flash_mutex);
+
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
@@ -214,13 +234,14 @@ static int rtas_flash_release(struct inode *inode, struct file *file)
uf->flist = NULL;
}
- atomic_dec(&dp->count);
+ mutex_unlock(&rtas_update_flash_mutex);
return 0;
}
-static void get_flash_status_msg(int status, char *buf)
+static size_t get_flash_status_msg(int status, char *buf)
{
- char *msg;
+ const char *msg;
+ size_t len;
switch (status) {
case FLASH_AUTH:
@@ -242,36 +263,47 @@ static void get_flash_status_msg(int status, char *buf)
msg = "ready: firmware image ready for flash on reboot\n";
break;
default:
- sprintf(buf, "error: unexpected status value %d\n", status);
- return;
+ return sprintf(buf, "error: unexpected status value %d\n",
+ status);
}
- strcpy(buf, msg);
+ len = strlen(msg);
+ memcpy(buf, msg, len + 1);
+ return len;
}
/* Reading the proc file will show status (not the firmware contents) */
-static ssize_t rtas_flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t rtas_flash_read_msg(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_update_flash_t *uf;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char msg[RTAS_MSG_MAXLEN];
+ size_t len;
+ int status;
- uf = dp->data;
-
- if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
- get_flash_status_msg(uf->status, msg);
- } else { /* FIRMWARE_UPDATE_NAME */
- sprintf(msg, "%d\n", uf->status);
- }
+ mutex_lock(&rtas_update_flash_mutex);
+ status = uf->status;
+ mutex_unlock(&rtas_update_flash_mutex);
- return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
+ /* Read as text message */
+ len = get_flash_status_msg(status, msg);
+ return simple_read_from_buffer(buf, count, ppos, msg, len);
}
-/* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr)
+static ssize_t rtas_flash_read_num(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- memset(ptr, 0, RTAS_BLK_SIZE);
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
+ char msg[RTAS_MSG_MAXLEN];
+ int status;
+
+ mutex_lock(&rtas_update_flash_mutex);
+ status = uf->status;
+ mutex_unlock(&rtas_update_flash_mutex);
+
+ /* Read as number */
+ sprintf(msg, "%d\n", status);
+ return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* We could be much more efficient here. But to keep this function
@@ -282,25 +314,24 @@ void rtas_block_ctor(void *ptr)
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_update_flash_t *uf;
+ struct rtas_update_flash_t *const uf = &rtas_update_flash_data;
char *p;
- int next_free;
+ int next_free, rc;
struct flash_block_list *fl;
- uf = (struct rtas_update_flash_t *) dp->data;
+ mutex_lock(&rtas_update_flash_mutex);
if (uf->status == FLASH_AUTH || count == 0)
- return count; /* discard data */
+ goto out; /* discard data */
/* In the case that the image is not ready for flashing, the memory
* allocated for the block list will be freed upon the release of the
* proc file
*/
if (uf->flist == NULL) {
- uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ uf->flist = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
- return -ENOMEM;
+ goto nomem;
}
fl = uf->flist;
@@ -309,63 +340,48 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
- fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
- return -ENOMEM;
+ goto nomem;
fl = fl->next;
next_free = 0;
}
if (count > RTAS_BLK_SIZE)
count = RTAS_BLK_SIZE;
- p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
+ p = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL);
if (!p)
- return -ENOMEM;
+ goto nomem;
if(copy_from_user(p, buffer, count)) {
kmem_cache_free(flash_block_cache, p);
- return -EFAULT;
+ rc = -EFAULT;
+ goto error;
}
fl->blocks[next_free].data = p;
fl->blocks[next_free].length = count;
fl->num_blocks++;
-
+out:
+ mutex_unlock(&rtas_update_flash_mutex);
return count;
-}
-static int rtas_excl_open(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *dp = PDE(inode);
-
- /* Enforce exclusive open with use count of PDE */
- spin_lock(&flash_file_open_lock);
- if (atomic_read(&dp->count) > 2) {
- spin_unlock(&flash_file_open_lock);
- return -EBUSY;
- }
-
- atomic_inc(&dp->count);
- spin_unlock(&flash_file_open_lock);
-
- return 0;
-}
-
-static int rtas_excl_release(struct inode *inode, struct file *file)
-{
- struct proc_dir_entry *dp = PDE(inode);
-
- atomic_dec(&dp->count);
-
- return 0;
+nomem:
+ rc = -ENOMEM;
+error:
+ mutex_unlock(&rtas_update_flash_mutex);
+ return rc;
}
-static void manage_flash(struct rtas_manage_flash_t *args_buf)
+/*
+ * Flash management routines.
+ */
+static void manage_flash(struct rtas_manage_flash_t *args_buf, unsigned int op)
{
s32 rc;
do {
- rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
- 1, NULL, args_buf->op);
+ rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1, 1,
+ NULL, op);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
@@ -374,55 +390,62 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf)
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_manage_flash_t *args_buf;
+ struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
char msg[RTAS_MSG_MAXLEN];
- int msglen;
-
- args_buf = dp->data;
- if (args_buf == NULL)
- return 0;
+ int msglen, status;
- msglen = sprintf(msg, "%d\n", args_buf->status);
+ mutex_lock(&rtas_manage_flash_mutex);
+ status = args_buf->status;
+ mutex_unlock(&rtas_manage_flash_mutex);
+ msglen = sprintf(msg, "%d\n", status);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_manage_flash_t *args_buf;
- const char reject_str[] = "0";
- const char commit_str[] = "1";
+ struct rtas_manage_flash_t *const args_buf = &rtas_manage_flash_data;
+ static const char reject_str[] = "0";
+ static const char commit_str[] = "1";
char stkbuf[10];
- int op;
+ int op, rc;
+
+ mutex_lock(&rtas_manage_flash_mutex);
- args_buf = (struct rtas_manage_flash_t *) dp->data;
if ((args_buf->status == MANAGE_AUTH) || (count == 0))
- return count;
+ goto out;
op = -1;
if (buf) {
if (count > 9) count = 9;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
+ rc = -EFAULT;
+ if (copy_from_user (stkbuf, buf, count))
+ goto error;
if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
op = RTAS_REJECT_TMP_IMG;
else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
op = RTAS_COMMIT_TMP_IMG;
}
- if (op == -1) /* buf is empty, or contains invalid string */
- return -EINVAL;
-
- args_buf->op = op;
- manage_flash(args_buf);
+ if (op == -1) { /* buf is empty, or contains invalid string */
+ rc = -EINVAL;
+ goto error;
+ }
+ manage_flash(args_buf, op);
+out:
+ mutex_unlock(&rtas_manage_flash_mutex);
return count;
+
+error:
+ mutex_unlock(&rtas_manage_flash_mutex);
+ return rc;
}
+/*
+ * Validation routines.
+ */
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
@@ -444,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
}
static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
- char *msg)
+ char *msg, int msglen)
{
int n;
@@ -452,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
n = sprintf(msg, "%d\n", args_buf->update_results);
if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
(args_buf->update_results == VALIDATE_TMP_UPDATE))
- n += sprintf(msg + n, "%s\n", args_buf->buf);
+ n += snprintf(msg + n, msglen - n, "%s\n",
+ args_buf->buf);
} else {
n = sprintf(msg, "%d\n", args_buf->status);
}
@@ -462,14 +486,14 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_validate_flash_t *args_buf;
- char msg[RTAS_MSG_MAXLEN];
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
+ char msg[VALIDATE_MSG_LEN];
int msglen;
- args_buf = dp->data;
-
- msglen = get_validate_flash_msg(args_buf, msg);
+ mutex_lock(&rtas_validate_flash_mutex);
+ msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
+ mutex_unlock(&rtas_validate_flash_mutex);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
@@ -477,24 +501,18 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_validate_flash_t *args_buf;
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
int rc;
- args_buf = (struct rtas_validate_flash_t *) dp->data;
-
- if (dp->data == NULL) {
- dp->data = kmalloc(sizeof(struct rtas_validate_flash_t),
- GFP_KERNEL);
- if (dp->data == NULL)
- return -ENOMEM;
- }
+ mutex_lock(&rtas_validate_flash_mutex);
/* We are only interested in the first 4K of the
* candidate image */
if ((*off >= VALIDATE_BUF_SIZE) ||
(args_buf->status == VALIDATE_AUTH)) {
*off += count;
+ mutex_unlock(&rtas_validate_flash_mutex);
return count;
}
@@ -517,31 +535,29 @@ static ssize_t validate_flash_write(struct file *file, const char __user *buf,
*off += count;
rc = count;
done:
- if (rc < 0) {
- kfree(dp->data);
- dp->data = NULL;
- }
+ mutex_unlock(&rtas_validate_flash_mutex);
return rc;
}
static int validate_flash_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- struct rtas_validate_flash_t *args_buf;
+ struct rtas_validate_flash_t *const args_buf =
+ &rtas_validate_flash_data;
- args_buf = (struct rtas_validate_flash_t *) dp->data;
+ mutex_lock(&rtas_validate_flash_mutex);
if (args_buf->status == VALIDATE_READY) {
args_buf->buf_size = VALIDATE_BUF_SIZE;
validate_flash(args_buf);
}
- /* The matching atomic_inc was in rtas_excl_open() */
- atomic_dec(&dp->count);
-
+ mutex_unlock(&rtas_validate_flash_mutex);
return 0;
}
+/*
+ * On-reboot flash update applicator.
+ */
static void rtas_flash_firmware(int reboot_type)
{
unsigned long image_size;
@@ -634,75 +650,57 @@ static void rtas_flash_firmware(int reboot_type)
spin_unlock(&rtas_data_buf_lock);
}
-static void remove_flash_pde(struct proc_dir_entry *dp)
-{
- if (dp) {
- kfree(dp->data);
- remove_proc_entry(dp->name, dp->parent);
- }
-}
-
-static int initialize_flash_pde_data(const char *rtas_call_name,
- size_t buf_size,
- struct proc_dir_entry *dp)
-{
+/*
+ * Manifest of proc files to create
+ */
+struct rtas_flash_file {
+ const char *filename;
+ const char *rtas_call_name;
int *status;
- int token;
-
- dp->data = kzalloc(buf_size, GFP_KERNEL);
- if (dp->data == NULL)
- return -ENOMEM;
-
- /*
- * This code assumes that the status int is the first member of the
- * struct
- */
- status = (int *) dp->data;
- token = rtas_token(rtas_call_name);
- if (token == RTAS_UNKNOWN_SERVICE)
- *status = FLASH_AUTH;
- else
- *status = FLASH_NO_OP;
-
- return 0;
-}
-
-static struct proc_dir_entry *create_flash_pde(const char *filename,
- const struct file_operations *fops)
-{
- return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops);
-}
-
-static const struct file_operations rtas_flash_operations = {
- .owner = THIS_MODULE,
- .read = rtas_flash_read,
- .write = rtas_flash_write,
- .open = rtas_excl_open,
- .release = rtas_flash_release,
- .llseek = default_llseek,
-};
-
-static const struct file_operations manage_flash_operations = {
- .owner = THIS_MODULE,
- .read = manage_flash_read,
- .write = manage_flash_write,
- .open = rtas_excl_open,
- .release = rtas_excl_release,
- .llseek = default_llseek,
+ const struct file_operations fops;
};
-static const struct file_operations validate_flash_operations = {
- .owner = THIS_MODULE,
- .read = validate_flash_read,
- .write = validate_flash_write,
- .open = rtas_excl_open,
- .release = validate_flash_release,
- .llseek = default_llseek,
+static const struct rtas_flash_file rtas_flash_files[] = {
+ {
+ .filename = "powerpc/rtas/" FIRMWARE_FLASH_NAME,
+ .rtas_call_name = "ibm,update-flash-64-and-reboot",
+ .status = &rtas_update_flash_data.status,
+ .fops.read = rtas_flash_read_msg,
+ .fops.write = rtas_flash_write,
+ .fops.release = rtas_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" FIRMWARE_UPDATE_NAME,
+ .rtas_call_name = "ibm,update-flash-64-and-reboot",
+ .status = &rtas_update_flash_data.status,
+ .fops.read = rtas_flash_read_num,
+ .fops.write = rtas_flash_write,
+ .fops.release = rtas_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" VALIDATE_FLASH_NAME,
+ .rtas_call_name = "ibm,validate-flash-image",
+ .status = &rtas_validate_flash_data.status,
+ .fops.read = validate_flash_read,
+ .fops.write = validate_flash_write,
+ .fops.release = validate_flash_release,
+ .fops.llseek = default_llseek,
+ },
+ {
+ .filename = "powerpc/rtas/" MANAGE_FLASH_NAME,
+ .rtas_call_name = "ibm,manage-flash-image",
+ .status = &rtas_manage_flash_data.status,
+ .fops.read = manage_flash_read,
+ .fops.write = manage_flash_write,
+ .fops.llseek = default_llseek,
+ }
};
static int __init rtas_flash_init(void)
{
- int rc;
+ int i;
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
@@ -710,93 +708,70 @@ static int __init rtas_flash_init(void)
return 1;
}
- firmware_flash_pde = create_flash_pde("powerpc/rtas/"
- FIRMWARE_FLASH_NAME,
- &rtas_flash_operations);
- if (firmware_flash_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
+ if (!rtas_validate_flash_data.buf)
+ return -ENOMEM;
- rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
- sizeof(struct rtas_update_flash_t),
- firmware_flash_pde);
- if (rc != 0)
- goto cleanup;
-
- firmware_update_pde = create_flash_pde("powerpc/rtas/"
- FIRMWARE_UPDATE_NAME,
- &rtas_flash_operations);
- if (firmware_update_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
+ flash_block_cache = kmem_cache_create("rtas_flash_cache",
+ RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+ NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __func__);
+ goto enomem_buf;
}
- rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
- sizeof(struct rtas_update_flash_t),
- firmware_update_pde);
- if (rc != 0)
- goto cleanup;
-
- validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
- &validate_flash_operations);
- if (validate_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ int token;
- rc = initialize_flash_pde_data("ibm,validate-flash-image",
- sizeof(struct rtas_validate_flash_t),
- validate_pde);
- if (rc != 0)
- goto cleanup;
-
- manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
- &manage_flash_operations);
- if (manage_pde == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
+ if (!proc_create(f->filename, S_IRUSR | S_IWUSR, NULL, &f->fops))
+ goto enomem;
- rc = initialize_flash_pde_data("ibm,manage-flash-image",
- sizeof(struct rtas_manage_flash_t),
- manage_pde);
- if (rc != 0)
- goto cleanup;
+ /*
+ * This code assumes that the status int is the first member of the
+ * struct
+ */
+ token = rtas_token(f->rtas_call_name);
+ if (token == RTAS_UNKNOWN_SERVICE)
+ *f->status = FLASH_AUTH;
+ else
+ *f->status = FLASH_NO_OP;
+ }
rtas_flash_term_hook = rtas_flash_firmware;
-
- flash_block_cache = kmem_cache_create("rtas_flash_cache",
- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
- rtas_block_ctor);
- if (!flash_block_cache) {
- printk(KERN_ERR "%s: failed to create block cache\n",
- __func__);
- rc = -ENOMEM;
- goto cleanup;
- }
return 0;
-cleanup:
- remove_flash_pde(firmware_flash_pde);
- remove_flash_pde(firmware_update_pde);
- remove_flash_pde(validate_pde);
- remove_flash_pde(manage_pde);
+enomem:
+ while (--i >= 0) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ remove_proc_entry(f->filename, NULL);
+ }
- return rc;
+ kmem_cache_destroy(flash_block_cache);
+enomem_buf:
+ kfree(rtas_validate_flash_data.buf);
+ return -ENOMEM;
}
static void __exit rtas_flash_cleanup(void)
{
+ int i;
+
rtas_flash_term_hook = NULL;
- if (flash_block_cache)
- kmem_cache_destroy(flash_block_cache);
+ if (rtas_firmware_flash_list) {
+ free_flash_list(rtas_firmware_flash_list);
+ rtas_firmware_flash_list = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rtas_flash_files); i++) {
+ const struct rtas_flash_file *f = &rtas_flash_files[i];
+ remove_proc_entry(f->filename, NULL);
+ }
- remove_flash_pde(firmware_flash_pde);
- remove_flash_pde(firmware_update_pde);
- remove_flash_pde(validate_pde);
- remove_flash_pde(manage_pde);
+ kmem_cache_destroy(flash_block_cache);
+ kfree(rtas_validate_flash_data.buf);
}
module_init(rtas_flash_init);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 71cb20d..6e7b7cd 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -201,7 +201,7 @@ static void python_countermeasures(struct device_node *dev)
iounmap(chip_regs);
}
-void __init init_pci_config_tokens (void)
+void __init init_pci_config_tokens(void)
{
read_pci_config = rtas_token("read-pci-config");
write_pci_config = rtas_token("write-pci-config");
@@ -209,7 +209,7 @@ void __init init_pci_config_tokens (void)
ibm_write_pci_config = rtas_token("ibm,write-pci-config");
}
-unsigned long get_phb_buid (struct device_node *phb)
+unsigned long get_phb_buid(struct device_node *phb)
{
struct resource r;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 1045ff4..1130c53 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -29,6 +29,7 @@
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
+#include <asm/topology.h>
static DEFINE_SPINLOCK(rtasd_log_lock);
@@ -87,6 +88,8 @@ static char *rtas_event_type(int type)
return "Resource Deallocation Event";
case RTAS_TYPE_DUMP:
return "Dump Notification Event";
+ case RTAS_TYPE_PRRN:
+ return "Platform Resource Reassignment Event";
}
return rtas_type[0];
@@ -265,9 +268,51 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
+}
+
+#ifdef CONFIG_PPC_PSERIES
+static s32 prrn_update_scope;
+static void prrn_work_fn(struct work_struct *work)
+{
+ /*
+ * For PRRN, we must pass the negative of the scope value in
+ * the RTAS event.
+ */
+ pseries_devicetree_update(-prrn_update_scope);
}
+static DECLARE_WORK(prrn_work, prrn_work_fn);
+
+void prrn_schedule_update(u32 scope)
+{
+ flush_work(&prrn_work);
+ prrn_update_scope = scope;
+ schedule_work(&prrn_work);
+}
+
+static void handle_rtas_event(const struct rtas_error_log *log)
+{
+ if (log->type == RTAS_TYPE_PRRN) {
+ /* For PRRN Events the extended log length is used to denote
+ * the scope for calling rtas update-nodes.
+ */
+ if (prrn_is_enabled())
+ prrn_schedule_update(log->extended_log_length);
+ }
+
+ return;
+}
+
+#else
+
+static void handle_rtas_event(const struct rtas_error_log *log)
+{
+ return;
+}
+
+#endif
+
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
@@ -388,8 +433,10 @@ static void do_event_scan(void)
break;
}
- if (error == 0)
+ if (error == 0) {
pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0);
+ handle_rtas_event((struct rtas_error_log *)logdata);
+ }
} while(error == 0);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1f6cbae0c..03e0f72 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -624,12 +624,6 @@ int check_legacy_ioport(unsigned long base_port)
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
break;
-#ifdef CONFIG_PPC_PREP
- case _PIDXR:
- case _PNPWRP:
- case PNPBIOS_BASE:
- /* implement me */
-#endif
default:
/* ipmi is supposed to fail here */
break;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 247815a..c7852c1 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -186,6 +186,15 @@ early_param("smt-enabled", early_smt_enabled);
#define check_smt_enabled()
#endif /* CONFIG_SMP */
+/** Fix up paca fields required for the boot cpu */
+static void fixup_boot_paca(void)
+{
+ /* The boot cpu is started */
+ get_paca()->cpu_start = 1;
+ /* Allow percpu accesses to work until we setup percpu data */
+ get_paca()->data_offset = 0;
+}
+
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
@@ -207,6 +216,8 @@ early_param("smt-enabled", early_smt_enabled);
void __init early_setup(unsigned long dt_ptr)
{
+ static __initdata struct paca_struct boot_paca;
+
/* -------- printk is _NOT_ safe to use here ! ------- */
/* Identify CPU type */
@@ -215,6 +226,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
initialise_paca(&boot_paca, 0);
setup_paca(&boot_paca);
+ fixup_boot_paca();
/* Initialize lockdep early or else spinlocks will blow */
lockdep_init();
@@ -235,11 +247,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Now we know the logical id of our boot cpu, setup the paca. */
setup_paca(&paca[boot_cpuid]);
-
- /* Fix up paca fields required for the boot cpu */
- get_paca()->cpu_start = 1;
- /* Allow percpu accesses to "work" until we setup percpu data */
- get_paca()->data_offset = 0;
+ fixup_boot_paca();
/* Probe the machine type */
probe_machine();
@@ -609,7 +617,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
-
+#ifdef CONFIG_PPC_64K_PAGES
+ init_mm.context.pte_frag = NULL;
+#endif
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 3b99711..457e97a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -13,10 +13,12 @@
#include <linux/signal.h>
#include <linux/uprobes.h>
#include <linux/key.h>
+#include <linux/context_tracking.h>
#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/debug.h>
+#include <asm/tm.h>
#include "signal.h"
@@ -24,18 +26,18 @@
* through debug.exception-trace sysctl.
*/
-int show_unhandled_signals = 0;
+int show_unhandled_signals = 1;
/*
* Allocate space for the signal frame
*/
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
/* Default to using normal stack */
- oldsp = get_clean_sp(regs, is_32);
+ oldsp = get_clean_sp(sp, is_32);
/* Check for alt stack */
if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -130,8 +132,9 @@ static int do_signal(struct pt_regs *regs)
* user space. The DABR will have been cleared if it
* triggered inside the kernel.
*/
- if (current->thread.dabr)
- set_dabr(current->thread.dabr, current->thread.dabrx);
+ if (current->thread.hw_brk.address &&
+ current->thread.hw_brk.type)
+ set_breakpoint(&current->thread.hw_brk);
#endif
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(current, regs);
@@ -158,6 +161,8 @@ static int do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
+ user_exit();
+
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -168,11 +173,41 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
+
+ user_enter();
}
-long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- unsigned long r5, unsigned long r6, unsigned long r7,
- unsigned long r8, struct pt_regs *regs)
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
{
- return do_sigaltstack(uss, uoss, regs->gpr[1]);
+ /* When in an active transaction that takes a signal, we need to be
+ * careful with the stack. It's possible that the stack has moved back
+ * up after the tbegin. The obvious case here is when the tbegin is
+ * called inside a function that returns before a tend. In this case,
+ * the stack is part of the checkpointed transactional memory state.
+ * If we write over this non transactionally or in suspend, we are in
+ * trouble because if we get a tm abort, the program counter and stack
+ * pointer will be back at the tbegin but our in memory stack won't be
+ * valid anymore.
+ *
+ * To avoid this, when taking a signal in an active transaction, we
+ * need to use the stack pointer from the checkpointed state, rather
+ * than the speculated state. This ensures that the signal context
+ * (written tm suspended) will be written below the stack required for
+ * the rollback. The transaction is aborted becuase of the treclaim,
+ * so any memory written between the tbegin and the signal will be
+ * rolled back anyway.
+ *
+ * For signals taken in non-TM or suspended mode, we use the
+ * normal/non-checkpointed stack pointer.
+ */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ tm_enable();
+ tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
+ return current->thread.ckpt_regs.gpr[1];
+ }
+#endif
+ return regs->gpr[1];
}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index e00acb4..c69b9ae 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
size_t frame_size, int is_32);
extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
@@ -25,13 +25,21 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
extern unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task);
+extern unsigned long copy_transact_fpr_to_user(void __user *to,
+ struct task_struct *task);
extern unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from);
+extern unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+ void __user *from);
#ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task);
+extern unsigned long copy_transact_vsx_to_user(void __user *to,
+ struct task_struct *task);
extern unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from);
+extern unsigned long copy_transact_vsx_from_user(struct task_struct *task,
+ void __user *from);
#endif
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 804e323..201385c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -43,6 +43,7 @@
#include <asm/sigcontext.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
+#include <asm/tm.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>
@@ -56,10 +57,7 @@
#undef DEBUG_SIG
#ifdef CONFIG_PPC64
-#define sys_sigsuspend compat_sys_sigsuspend
-#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
#define sys_rt_sigreturn compat_sys_rt_sigreturn
-#define sys_sigaction compat_sys_sigaction
#define sys_swapcontext compat_sys_swapcontext
#define sys_sigreturn compat_sys_sigreturn
@@ -68,6 +66,8 @@
#define mcontext mcontext32
#define ucontext ucontext32
+#define __save_altstack __compat_save_altstack
+
/*
* Userspace code may pass a ucontext which doesn't include VSX added
* at the end. We need to check for this case.
@@ -130,23 +130,6 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-static inline int get_old_sigaction(struct k_sigaction *new_ka,
- struct old_sigaction __user *act)
-{
- compat_old_sigset_t mask;
- compat_uptr_t handler, restorer;
-
- if (get_user(handler, &act->sa_handler) ||
- __get_user(restorer, &act->sa_restorer) ||
- __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
- return -EFAULT;
- new_ka->sa.sa_handler = compat_ptr(handler);
- new_ka->sa.sa_restorer = compat_ptr(restorer);
- siginitset(&new_ka->sa.sa_mask, mask);
- return 0;
-}
-
#define to_user_ptr(p) ptr_to_compat(p)
#define from_user_ptr(p) compat_ptr(p)
@@ -196,21 +179,6 @@ static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
return copy_from_user(set, uset, sizeof(*uset));
}
-static inline int get_old_sigaction(struct k_sigaction *new_ka,
- struct old_sigaction __user *act)
-{
- old_sigset_t mask;
-
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
- __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
- return -EFAULT;
- siginitset(&new_ka->sa.sa_mask, mask);
- return 0;
-}
-
#define to_user_ptr(p) ((unsigned long)(p))
#define from_user_ptr(p) ((void __user *)(p))
@@ -234,50 +202,8 @@ static inline int restore_general_regs(struct pt_regs *regs,
return -EFAULT;
return 0;
}
-
-#endif /* CONFIG_PPC64 */
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-long sys_sigsuspend(old_sigset_t mask)
-{
- sigset_t blocked;
- siginitset(&blocked, mask);
- return sigsuspend(&blocked);
-}
-
-long sys_sigaction(int sig, struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
-#ifdef CONFIG_PPC64
- if (sig < 0)
- sig = -sig;
#endif
- if (act) {
- if (get_old_sigaction(&new_ka, act))
- return -EFAULT;
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(to_user_ptr(old_ka.sa.sa_handler),
- &oact->sa_handler) ||
- __put_user(to_user_ptr(old_ka.sa.sa_restorer),
- &oact->sa_restorer) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
- return -EFAULT;
- }
-
- return ret;
-}
-
/*
* When we have signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
@@ -293,6 +219,10 @@ long sys_sigaction(int sig, struct old_sigaction __user *act,
struct sigframe {
struct sigcontext sctx; /* the sigcontext */
struct mcontext mctx; /* all the register values */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct sigcontext sctx_transact;
+ struct mcontext mctx_transact;
+#endif
/*
* Programs using the rs6000/xcoff abi can save up to 19 gp
* regs and 18 fp regs below sp before decrementing it.
@@ -321,6 +251,9 @@ struct rt_sigframe {
struct siginfo info;
#endif
struct ucontext uc;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct ucontext uc_transact;
+#endif
/*
* Programs using the rs6000/xcoff abi can save up to 19 gp
* regs and 18 fp regs below sp before decrementing it.
@@ -381,6 +314,61 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+unsigned long copy_transact_fpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ double buf[ELF_NFPREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ buf[i] = task->thread.TS_TRANS_FPR(i);
+ memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
+ return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
+}
+
+unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ double buf[ELF_NFPREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ task->thread.TS_TRANS_FPR(i) = buf[i];
+ memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
+
+ return 0;
+}
+
+unsigned long copy_transact_vsx_to_user(void __user *to,
+ struct task_struct *task)
+{
+ double buf[ELF_NVSRHALFREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < ELF_NVSRHALFREG; i++)
+ buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
+ return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
+}
+
+unsigned long copy_transact_vsx_from_user(struct task_struct *task,
+ void __user *from)
+{
+ double buf[ELF_NVSRHALFREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < ELF_NVSRHALFREG ; i++)
+ task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ return 0;
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#else
inline unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
@@ -395,6 +383,22 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
return __copy_from_user(task->thread.fpr, from,
ELF_NFPREG * sizeof(double));
}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+inline unsigned long copy_transact_fpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ return __copy_to_user(to, task->thread.transact_fpr,
+ ELF_NFPREG * sizeof(double));
+}
+
+inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ return __copy_from_user(task->thread.transact_fpr, from,
+ ELF_NFPREG * sizeof(double));
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#endif
/*
@@ -483,6 +487,150 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Save the current user registers on the user stack.
+ * We only save the altivec/spe registers if the process has used
+ * altivec/spe instructions at some point.
+ * We also save the transactional registers to a second ucontext in the
+ * frame.
+ *
+ * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
+ */
+static int save_tm_user_regs(struct pt_regs *regs,
+ struct mcontext __user *frame,
+ struct mcontext __user *tm_frame, int sigret)
+{
+ unsigned long msr = regs->msr;
+
+ /* Make sure floating point registers are stored in regs */
+ flush_fp_to_thread(current);
+
+ /* Save both sets of general registers */
+ if (save_general_regs(&current->thread.ckpt_regs, frame)
+ || save_general_regs(regs, tm_frame))
+ return 1;
+
+ /* Stash the top half of the 64bit MSR into the 32bit MSR word
+ * of the transactional mcontext. This way we have a backward-compatible
+ * MSR in the 'normal' (checkpointed) mcontext and additionally one can
+ * also look at what type of transaction (T or S) was active at the
+ * time of the signal.
+ */
+ if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
+ return 1;
+
+#ifdef CONFIG_ALTIVEC
+ /* save altivec registers */
+ if (current->thread.used_vr) {
+ flush_altivec_to_thread(current);
+ if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+ ELF_NVRREG * sizeof(vector128)))
+ return 1;
+ if (msr & MSR_VEC) {
+ if (__copy_to_user(&tm_frame->mc_vregs,
+ current->thread.transact_vr,
+ ELF_NVRREG * sizeof(vector128)))
+ return 1;
+ } else {
+ if (__copy_to_user(&tm_frame->mc_vregs,
+ current->thread.vr,
+ ELF_NVRREG * sizeof(vector128)))
+ return 1;
+ }
+
+ /* set MSR_VEC in the saved MSR value to indicate that
+ * frame->mc_vregs contains valid data
+ */
+ msr |= MSR_VEC;
+ }
+
+ /* We always copy to/from vrsave, it's 0 if we don't have or don't
+ * use altivec. Since VSCR only contains 32 bits saved in the least
+ * significant bits of a vector, we "cheat" and stuff VRSAVE in the
+ * most significant bits of that same vector. --BenH
+ */
+ if (__put_user(current->thread.vrsave,
+ (u32 __user *)&frame->mc_vregs[32]))
+ return 1;
+ if (msr & MSR_VEC) {
+ if (__put_user(current->thread.transact_vrsave,
+ (u32 __user *)&tm_frame->mc_vregs[32]))
+ return 1;
+ } else {
+ if (__put_user(current->thread.vrsave,
+ (u32 __user *)&tm_frame->mc_vregs[32]))
+ return 1;
+ }
+#endif /* CONFIG_ALTIVEC */
+
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
+ return 1;
+ if (msr & MSR_FP) {
+ if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
+ return 1;
+ } else {
+ if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
+ return 1;
+ }
+
+#ifdef CONFIG_VSX
+ /*
+ * Copy VSR 0-31 upper half from thread_struct to local
+ * buffer, then write that to userspace. Also set MSR_VSX in
+ * the saved MSR value to indicate that frame->mc_vregs
+ * contains valid data
+ */
+ if (current->thread.used_vsr) {
+ __giveup_vsx(current);
+ if (copy_vsx_to_user(&frame->mc_vsregs, current))
+ return 1;
+ if (msr & MSR_VSX) {
+ if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
+ current))
+ return 1;
+ } else {
+ if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
+ return 1;
+ }
+
+ msr |= MSR_VSX;
+ }
+#endif /* CONFIG_VSX */
+#ifdef CONFIG_SPE
+ /* SPE regs are not checkpointed with TM, so this section is
+ * simply the same as in save_user_regs().
+ */
+ if (current->thread.used_spe) {
+ flush_spe_to_thread(current);
+ if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
+ ELF_NEVRREG * sizeof(u32)))
+ return 1;
+ /* set MSR_SPE in the saved MSR value to indicate that
+ * frame->mc_vregs contains valid data */
+ msr |= MSR_SPE;
+ }
+
+ /* We always copy to/from spefscr */
+ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
+ return 1;
+#endif /* CONFIG_SPE */
+
+ if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
+ return 1;
+ if (sigret) {
+ /* Set up the sigreturn trampoline: li r0,sigret; sc */
+ if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
+ || __put_user(0x44000002UL, &frame->tramp[1]))
+ return 1;
+ flush_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[2]);
+ }
+
+ return 0;
+}
+#endif
+
/*
* Restore the current user register values from the user stack,
* (except for MSR).
@@ -588,90 +736,142 @@ static long restore_user_regs(struct pt_regs *regs,
return 0;
}
-#ifdef CONFIG_PPC64
-long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
- struct sigaction32 __user *oact, size_t sigsetsize)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Restore the current user register values from the user stack, except for
+ * MSR, and recheckpoint the original checkpointed register state for processes
+ * in transactions.
+ */
+static long restore_tm_user_regs(struct pt_regs *regs,
+ struct mcontext __user *sr,
+ struct mcontext __user *tm_sr)
{
- struct k_sigaction new_ka, old_ka;
- int ret;
+ long err;
+ unsigned long msr;
+#ifdef CONFIG_VSX
+ int i;
+#endif
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
+ /*
+ * restore general registers but not including MSR or SOFTE. Also
+ * take care of keeping r2 (TLS) intact if not a signal.
+ * See comment in signal_64.c:restore_tm_sigcontexts();
+ * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
+ * were set by the signal delivery.
+ */
+ err = restore_general_regs(regs, tm_sr);
+ err |= restore_general_regs(&current->thread.ckpt_regs, sr);
- if (act) {
- compat_uptr_t handler;
+ err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
- ret = get_user(handler, &act->sa_handler);
- new_ka.sa.sa_handler = compat_ptr(handler);
- ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- if (ret)
- return -EFAULT;
- }
+ err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
+ if (err)
+ return 1;
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
- if (!ret && oact) {
- ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
- ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- }
- return ret;
-}
+ /* Restore the previous little-endian mode */
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
-/*
- * Note: it is necessary to treat how as an unsigned int, with the
- * corresponding cast to a signed int to insure that the proper
- * conversion (sign extension) between the register representation
- * of a signed int (msr in 32-bit mode) and the register representation
- * of a signed int (msr in 64-bit mode) is performed.
- */
-long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset, size_t sigsetsize)
-{
- sigset_t s;
- sigset_t __user *up;
- int ret;
- mm_segment_t old_fs = get_fs();
+ /*
+ * Do this before updating the thread state in
+ * current->thread.fpr/vr/evr. That way, if we get preempted
+ * and another task grabs the FPU/Altivec/SPE, it won't be
+ * tempted to save the current CPU state into the thread_struct
+ * and corrupt what we are writing there.
+ */
+ discard_lazy_cpu_state();
- if (set) {
- if (get_sigset_t(&s, set))
- return -EFAULT;
+#ifdef CONFIG_ALTIVEC
+ regs->msr &= ~MSR_VEC;
+ if (msr & MSR_VEC) {
+ /* restore altivec registers from the stack */
+ if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+ sizeof(sr->mc_vregs)) ||
+ __copy_from_user(current->thread.transact_vr,
+ &tm_sr->mc_vregs,
+ sizeof(sr->mc_vregs)))
+ return 1;
+ } else if (current->thread.used_vr) {
+ memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
+ memset(current->thread.transact_vr, 0,
+ ELF_NVRREG * sizeof(vector128));
}
- set_fs(KERNEL_DS);
- /* This is valid because of the set_fs() */
- up = (sigset_t __user *) &s;
- ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
- sigsetsize);
- set_fs(old_fs);
- if (ret)
- return ret;
- if (oset) {
- if (put_sigset_t(oset, &s))
- return -EFAULT;
- }
- return 0;
-}
+ /* Always get VRSAVE back */
+ if (__get_user(current->thread.vrsave,
+ (u32 __user *)&sr->mc_vregs[32]) ||
+ __get_user(current->thread.transact_vrsave,
+ (u32 __user *)&tm_sr->mc_vregs[32]))
+ return 1;
+#endif /* CONFIG_ALTIVEC */
-long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
-{
- sigset_t s;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- /* The __user pointer cast is valid because of the set_fs() */
- ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
- set_fs(old_fs);
- if (!ret) {
- if (put_sigset_t(set, &s))
- return -EFAULT;
+ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+
+ if (copy_fpr_from_user(current, &sr->mc_fregs) ||
+ copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
+ return 1;
+
+#ifdef CONFIG_VSX
+ regs->msr &= ~MSR_VSX;
+ if (msr & MSR_VSX) {
+ /*
+ * Restore altivec registers from the stack to a local
+ * buffer, then write this out to the thread_struct
+ */
+ if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
+ copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
+ return 1;
+ } else if (current->thread.used_vsr)
+ for (i = 0; i < 32 ; i++) {
+ current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ }
+#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_SPE
+ /* SPE regs are not checkpointed with TM, so this section is
+ * simply the same as in restore_user_regs().
+ */
+ regs->msr &= ~MSR_SPE;
+ if (msr & MSR_SPE) {
+ if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
+ ELF_NEVRREG * sizeof(u32)))
+ return 1;
+ } else if (current->thread.used_spe)
+ memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+
+ /* Always get SPEFSCR back */
+ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
+ + ELF_NEVRREG))
+ return 1;
+#endif /* CONFIG_SPE */
+
+ /* Now, recheckpoint. This loads up all of the checkpointed (older)
+ * registers, including FP and V[S]Rs. After recheckpointing, the
+ * transactional versions should be loaded.
+ */
+ tm_enable();
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&current->thread, msr);
+ /* The task has moved into TM state S, so ensure MSR reflects this */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
+ do_load_up_transact_fpu(&current->thread);
+ regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
- return ret;
-}
+#ifdef CONFIG_ALTIVEC
+ if (msr & MSR_VEC) {
+ do_load_up_transact_altivec(&current->thread);
+ regs->msr |= MSR_VEC;
+ }
+#endif
+ return 0;
+}
+#endif
+#ifdef CONFIG_PPC64
int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
{
int err;
@@ -740,79 +940,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
return 0;
}
-
-/*
- * Note: it is necessary to treat pid and sig as unsigned ints, with the
- * corresponding cast to a signed int to insure that the proper conversion
- * (sign extension) between the register representation of a signed int
- * (msr in 32-bit mode) and the register representation of a signed int
- * (msr in 64-bit mode) is performed.
- */
-long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
-{
- siginfo_t info;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- ret = copy_siginfo_from_user32(&info, uinfo);
- if (unlikely(ret))
- return ret;
-
- set_fs (KERNEL_DS);
- /* The __user pointer cast is valid becasuse of the set_fs() */
- ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
- set_fs (old_fs);
- return ret;
-}
-/*
- * Start Alternate signal stack support
- *
- * System Calls
- * sigaltatck compat_sys_sigaltstack
- */
-
-int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
- int r6, int r7, int r8, struct pt_regs *regs)
-{
- stack_32_t __user * newstack = compat_ptr(__new);
- stack_32_t __user * oldstack = compat_ptr(__old);
- stack_t uss, uoss;
- int ret;
- mm_segment_t old_fs;
- unsigned long sp;
- compat_uptr_t ss_sp;
-
- /*
- * set sp to the user stack on entry to the system call
- * the system call router sets R9 to the saved registers
- */
- sp = regs->gpr[1];
-
- /* Put new stack info in local 64 bit stack struct */
- if (newstack) {
- if (get_user(ss_sp, &newstack->ss_sp) ||
- __get_user(uss.ss_flags, &newstack->ss_flags) ||
- __get_user(uss.ss_size, &newstack->ss_size))
- return -EFAULT;
- uss.ss_sp = compat_ptr(ss_sp);
- }
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- /* The __user pointer casts are valid because of the set_fs() */
- ret = do_sigaltstack(
- newstack ? (stack_t __user *) &uss : NULL,
- oldstack ? (stack_t __user *) &uoss : NULL,
- sp);
- set_fs(old_fs);
- /* Copy the stack information to the user output buffer */
- if (!ret && oldstack &&
- (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
- __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
- __put_user(uoss.ss_size, &oldstack->ss_size)))
- return -EFAULT;
- return ret;
-}
#endif /* CONFIG_PPC64 */
/*
@@ -827,10 +954,12 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
struct mcontext __user *frame;
void __user *addr;
unsigned long newsp = 0;
+ int sigret;
+ unsigned long tramp;
/* Set up Signal Frame */
/* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+ rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
addr = rt_sf;
if (unlikely(rt_sf == NULL))
goto badframe;
@@ -838,11 +967,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* Put the siginfo & fill in most of the ucontext */
if (copy_siginfo_to_user(&rt_sf->info, info)
|| __put_user(0, &rt_sf->uc.uc_flags)
- || __put_user(0, &rt_sf->uc.uc_link)
- || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
- || __put_user(sas_ss_flags(regs->gpr[1]),
- &rt_sf->uc.uc_stack.ss_flags)
- || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
+ || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
|| __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
&rt_sf->uc.uc_regs)
|| put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
@@ -852,14 +977,37 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
- if (save_user_regs(regs, frame, 0, 1))
- goto badframe;
- regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+ sigret = 0;
+ tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
- if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
+ sigret = __NR_rt_sigreturn;
+ tramp = (unsigned long) frame->tramp;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
+ &rt_sf->uc_transact.uc_mcontext, sigret))
+ goto badframe;
+ }
+ else
+#endif
+ if (save_user_regs(regs, frame, sigret, 1))
+ goto badframe;
+ regs->link = tramp;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ if (__put_user((unsigned long)&rt_sf->uc_transact,
+ &rt_sf->uc.uc_link)
+ || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
+ &rt_sf->uc_transact.uc_regs))
goto badframe;
- regs->link = (unsigned long) frame->tramp;
}
+ else
+#endif
+ if (__put_user(0, &rt_sf->uc.uc_link))
+ goto badframe;
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
@@ -878,6 +1026,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state:
+ */
+ regs->msr &= ~MSR_TS_MASK;
+#endif
return 1;
badframe:
@@ -925,6 +1080,35 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static int do_setcontext_tm(struct ucontext __user *ucp,
+ struct ucontext __user *tm_ucp,
+ struct pt_regs *regs)
+{
+ sigset_t set;
+ struct mcontext __user *mcp;
+ struct mcontext __user *tm_mcp;
+ u32 cmcp;
+ u32 tm_cmcp;
+
+ if (get_sigset_t(&set, &ucp->uc_sigmask))
+ return -EFAULT;
+
+ if (__get_user(cmcp, &ucp->uc_regs) ||
+ __get_user(tm_cmcp, &tm_ucp->uc_regs))
+ return -EFAULT;
+ mcp = (struct mcontext __user *)(u64)cmcp;
+ tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
+ /* no need to check access_ok(mcp), since mcp < 4GB */
+
+ set_current_blocked(&set);
+ if (restore_tm_user_regs(regs, mcp, tm_mcp))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
long sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx,
int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
@@ -1020,7 +1204,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
struct rt_sigframe __user *rt_sf;
-
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct ucontext __user *uc_transact;
+ unsigned long msr_hi;
+ unsigned long tmp;
+ int tm_restore = 0;
+#endif
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -1028,6 +1217,34 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
goto bad;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (__get_user(tmp, &rt_sf->uc.uc_link))
+ goto bad;
+ uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
+ if (uc_transact) {
+ u32 cmcp;
+ struct mcontext __user *mcp;
+
+ if (__get_user(cmcp, &uc_transact->uc_regs))
+ return -EFAULT;
+ mcp = (struct mcontext __user *)(u64)cmcp;
+ /* The top 32 bits of the MSR are stashed in the transactional
+ * ucontext. */
+ if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
+ goto bad;
+
+ if (MSR_TM_SUSPENDED(msr_hi<<32)) {
+ /* We only recheckpoint on return if we're
+ * transaction.
+ */
+ tm_restore = 1;
+ if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
+ goto bad;
+ }
+ }
+ if (!tm_restore)
+ /* Fall through, for non-TM restore */
+#endif
if (do_setcontext(&rt_sf->uc, regs, 1))
goto bad;
@@ -1039,14 +1256,11 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
* change it. -- paulus
*/
#ifdef CONFIG_PPC64
- /*
- * We use the compat_sys_ version that does the 32/64 bits conversion
- * and takes userland pointer directly. What about error checking ?
- * nobody does any...
- */
- compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
+ if (compat_restore_altstack(&rt_sf->uc.uc_stack))
+ goto bad;
#else
- do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
+ if (restore_altstack(&rt_sf->uc.uc_stack))
+ goto bad;
#endif
set_thread_flag(TIF_RESTOREALL);
return 0;
@@ -1162,7 +1376,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
* always done it up until now so it is probably better not to
* change it. -- paulus
*/
- do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
+ restore_altstack(&ctx->uc_stack);
set_thread_flag(TIF_RESTOREALL);
out:
@@ -1179,9 +1393,11 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
struct sigcontext __user *sc;
struct sigframe __user *frame;
unsigned long newsp = 0;
+ int sigret;
+ unsigned long tramp;
/* Set up Signal Frame */
- frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
if (unlikely(frame == NULL))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
@@ -1201,14 +1417,25 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
goto badframe;
if (vdso32_sigtramp && current->mm->context.vdso_base) {
- if (save_user_regs(regs, &frame->mctx, 0, 1))
- goto badframe;
- regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
+ sigret = 0;
+ tramp = current->mm->context.vdso_base + vdso32_sigtramp;
} else {
- if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
+ sigret = __NR_sigreturn;
+ tramp = (unsigned long) frame->mctx.tramp;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
+ sigret))
goto badframe;
- regs->link = (unsigned long) frame->mctx.tramp;
}
+ else
+#endif
+ if (save_user_regs(regs, &frame->mctx, sigret, 1))
+ goto badframe;
+
+ regs->link = tramp;
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
@@ -1223,7 +1450,13 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
-
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state:
+ */
+ regs->msr &= ~MSR_TS_MASK;
+#endif
return 1;
badframe:
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 1ca045d..3459473 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -34,6 +34,7 @@
#include <asm/syscalls.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
+#include <asm/tm.h>
#include "signal.h"
@@ -56,6 +57,9 @@
struct rt_sigframe {
/* sys_rt_sigreturn requires the ucontext be the first field */
struct ucontext uc;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ struct ucontext uc_transact;
+#endif
unsigned long _unused[2];
unsigned int tramp[TRAMP_SIZE];
struct siginfo __user *pinfo;
@@ -145,6 +149,136 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
return err;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * As above, but Transactional Memory is in use, so deliver sigcontexts
+ * containing checkpointed and transactional register states.
+ *
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state. If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
+ */
+static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ struct sigcontext __user *tm_sc,
+ struct pt_regs *regs,
+ int signr, sigset_t *set, unsigned long handler)
+{
+ /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
+ * process never used altivec yet (MSR_VEC is zero in pt_regs of
+ * the context). This is very important because we must ensure we
+ * don't lose the VRSAVE content that may have been set prior to
+ * the process doing its first vector operation
+ * Userland shall check AT_HWCAP to know wether it can rely on the
+ * v_regs pointer or not.
+ */
+#ifdef CONFIG_ALTIVEC
+ elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
+ (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+ elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
+ (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
+#endif
+ unsigned long msr = regs->msr;
+ long err = 0;
+
+ BUG_ON(!MSR_TM_ACTIVE(regs->msr));
+
+ flush_fp_to_thread(current);
+
+#ifdef CONFIG_ALTIVEC
+ err |= __put_user(v_regs, &sc->v_regs);
+ err |= __put_user(tm_v_regs, &tm_sc->v_regs);
+
+ /* save altivec registers */
+ if (current->thread.used_vr) {
+ flush_altivec_to_thread(current);
+ /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
+ err |= __copy_to_user(v_regs, current->thread.vr,
+ 33 * sizeof(vector128));
+ /* If VEC was enabled there are transactional VRs valid too,
+ * else they're a copy of the checkpointed VRs.
+ */
+ if (msr & MSR_VEC)
+ err |= __copy_to_user(tm_v_regs,
+ current->thread.transact_vr,
+ 33 * sizeof(vector128));
+ else
+ err |= __copy_to_user(tm_v_regs,
+ current->thread.vr,
+ 33 * sizeof(vector128));
+
+ /* set MSR_VEC in the MSR value in the frame to indicate
+ * that sc->v_reg contains valid data.
+ */
+ msr |= MSR_VEC;
+ }
+ /* We always copy to/from vrsave, it's 0 if we don't have or don't
+ * use altivec.
+ */
+ err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
+ if (msr & MSR_VEC)
+ err |= __put_user(current->thread.transact_vrsave,
+ (u32 __user *)&tm_v_regs[33]);
+ else
+ err |= __put_user(current->thread.vrsave,
+ (u32 __user *)&tm_v_regs[33]);
+
+#else /* CONFIG_ALTIVEC */
+ err |= __put_user(0, &sc->v_regs);
+ err |= __put_user(0, &tm_sc->v_regs);
+#endif /* CONFIG_ALTIVEC */
+
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
+ if (msr & MSR_FP)
+ err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current);
+ else
+ err |= copy_fpr_to_user(&tm_sc->fp_regs, current);
+
+#ifdef CONFIG_VSX
+ /*
+ * Copy VSX low doubleword to local buffer for formatting,
+ * then out to userspace. Update v_regs to point after the
+ * VMX data.
+ */
+ if (current->thread.used_vsr) {
+ __giveup_vsx(current);
+ v_regs += ELF_NVRREG;
+ tm_v_regs += ELF_NVRREG;
+
+ err |= copy_vsx_to_user(v_regs, current);
+
+ if (msr & MSR_VSX)
+ err |= copy_transact_vsx_to_user(tm_v_regs, current);
+ else
+ err |= copy_vsx_to_user(tm_v_regs, current);
+
+ /* set MSR_VSX in the MSR value in the frame to
+ * indicate that sc->vs_reg) contains valid data.
+ */
+ msr |= MSR_VSX;
+ }
+#endif /* CONFIG_VSX */
+
+ err |= __put_user(&sc->gp_regs, &sc->regs);
+ err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
+ WARN_ON(!FULL_REGS(regs));
+ err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
+ err |= __copy_to_user(&sc->gp_regs,
+ &current->thread.ckpt_regs, GP_REGS_SIZE);
+ err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
+ err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
+ err |= __put_user(signr, &sc->signal);
+ err |= __put_user(handler, &sc->handler);
+ if (set != NULL)
+ err |= __put_user(set->sig[0], &sc->oldmask);
+
+ return err;
+}
+#endif
+
/*
* Restore the sigcontext from the signal frame.
*/
@@ -241,6 +375,155 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
return err;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Restore the two sigcontexts from the frame of a transactional processes.
+ */
+
+static long restore_tm_sigcontexts(struct pt_regs *regs,
+ struct sigcontext __user *sc,
+ struct sigcontext __user *tm_sc)
+{
+#ifdef CONFIG_ALTIVEC
+ elf_vrreg_t __user *v_regs, *tm_v_regs;
+#endif
+ unsigned long err = 0;
+ unsigned long msr;
+#ifdef CONFIG_VSX
+ int i;
+#endif
+ /* copy the GPRs */
+ err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
+ err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs,
+ sizeof(regs->gpr));
+
+ /*
+ * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
+ * TEXASR was set by the signal delivery reclaim, as was TFIAR.
+ * Users doing anything abhorrent like thread-switching w/ signals for
+ * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
+ * For the case of getting a signal and simply returning from it,
+ * we don't need to re-copy them here.
+ */
+ err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
+ err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
+
+ /* get MSR separately, transfer the LE bit if doing signal return */
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+
+ /* The following non-GPR non-FPR non-VR state is also checkpointed: */
+ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
+ err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
+ err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
+ err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
+ err |= __get_user(current->thread.ckpt_regs.ctr,
+ &sc->gp_regs[PT_CTR]);
+ err |= __get_user(current->thread.ckpt_regs.link,
+ &sc->gp_regs[PT_LNK]);
+ err |= __get_user(current->thread.ckpt_regs.xer,
+ &sc->gp_regs[PT_XER]);
+ err |= __get_user(current->thread.ckpt_regs.ccr,
+ &sc->gp_regs[PT_CCR]);
+
+ /* These regs are not checkpointed; they can go in 'regs'. */
+ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+ err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
+ err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
+ err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
+
+ /*
+ * Do this before updating the thread state in
+ * current->thread.fpr/vr. That way, if we get preempted
+ * and another task grabs the FPU/Altivec, it won't be
+ * tempted to save the current CPU state into the thread_struct
+ * and corrupt what we are writing there.
+ */
+ discard_lazy_cpu_state();
+
+ /*
+ * Force reload of FP/VEC.
+ * This has to be done before copying stuff into current->thread.fpr/vr
+ * for the reasons explained in the previous comment.
+ */
+ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+
+#ifdef CONFIG_ALTIVEC
+ err |= __get_user(v_regs, &sc->v_regs);
+ err |= __get_user(tm_v_regs, &tm_sc->v_regs);
+ if (err)
+ return err;
+ if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
+ return -EFAULT;
+ if (tm_v_regs && !access_ok(VERIFY_READ,
+ tm_v_regs, 34 * sizeof(vector128)))
+ return -EFAULT;
+ /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
+ if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) {
+ err |= __copy_from_user(current->thread.vr, v_regs,
+ 33 * sizeof(vector128));
+ err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
+ 33 * sizeof(vector128));
+ }
+ else if (current->thread.used_vr) {
+ memset(current->thread.vr, 0, 33 * sizeof(vector128));
+ memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
+ }
+ /* Always get VRSAVE back */
+ if (v_regs != 0 && tm_v_regs != 0) {
+ err |= __get_user(current->thread.vrsave,
+ (u32 __user *)&v_regs[33]);
+ err |= __get_user(current->thread.transact_vrsave,
+ (u32 __user *)&tm_v_regs[33]);
+ }
+ else {
+ current->thread.vrsave = 0;
+ current->thread.transact_vrsave = 0;
+ }
+#endif /* CONFIG_ALTIVEC */
+ /* restore floating point */
+ err |= copy_fpr_from_user(current, &sc->fp_regs);
+ err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs);
+#ifdef CONFIG_VSX
+ /*
+ * Get additional VSX data. Update v_regs to point after the
+ * VMX data. Copy VSX low doubleword from userspace to local
+ * buffer for formatting, then into the taskstruct.
+ */
+ if (v_regs && ((msr & MSR_VSX) != 0)) {
+ v_regs += ELF_NVRREG;
+ tm_v_regs += ELF_NVRREG;
+ err |= copy_vsx_from_user(current, v_regs);
+ err |= copy_transact_vsx_from_user(current, tm_v_regs);
+ } else {
+ for (i = 0; i < 32 ; i++) {
+ current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+ current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+ }
+ }
+#endif
+ tm_enable();
+ /* This loads the checkpointed FP/VEC state, if used */
+ tm_recheckpoint(&current->thread, msr);
+ /* The task has moved into TM state S, so ensure MSR reflects this: */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
+
+ /* This loads the speculative FP/VEC state, if used */
+ if (msr & MSR_FP) {
+ do_load_up_transact_fpu(&current->thread);
+ regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ }
+#ifdef CONFIG_ALTIVEC
+ if (msr & MSR_VEC) {
+ do_load_up_transact_altivec(&current->thread);
+ regs->msr |= MSR_VEC;
+ }
+#endif
+
+ return err;
+}
+#endif
+
/*
* Setup the trampoline code on the stack
*/
@@ -355,6 +638,9 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
{
struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
sigset_t set;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ unsigned long msr;
+#endif
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -365,13 +651,26 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
+ goto badframe;
+ if (MSR_TM_SUSPENDED(msr)) {
+ /* We recheckpoint on return. */
+ struct ucontext __user *uc_transact;
+ if (__get_user(uc_transact, &uc->uc_link))
+ goto badframe;
+ if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
+ &uc_transact->uc_mcontext))
+ goto badframe;
+ }
+ else
+ /* Fall through, for non-TM restore */
+#endif
if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
goto badframe;
- /* do_sigaltstack expects a __user pointer and won't modify
- * what's in there anyway
- */
- do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);
+ if (restore_altstack(&uc->uc_stack))
+ goto badframe;
set_thread_flag(TIF_RESTOREALL);
return 0;
@@ -403,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
unsigned long newsp = 0;
long err = 0;
- frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+ frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
if (unlikely(frame == NULL))
goto badframe;
@@ -415,19 +714,39 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->gpr[1]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL,
- (unsigned long)ka->sa.sa_handler, 1);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(regs->msr)) {
+ /* The ucontext_t passed to userland points to the second
+ * ucontext_t (for transactional state) with its uc_link ptr.
+ */
+ err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
+ err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
+ &frame->uc_transact.uc_mcontext,
+ regs, signr,
+ NULL,
+ (unsigned long)ka->sa.sa_handler);
+ } else
+#endif
+ {
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr,
+ NULL, (unsigned long)ka->sa.sa_handler,
+ 1);
+ }
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
current->thread.fpscr.val = 0;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state:
+ */
+ regs->msr &= ~MSR_TS_MASK;
+#endif
/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e6f5d08..93df71b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -650,7 +650,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
}
/* Activate a secondary processor. */
-void start_secondary(void *unused)
+__cpuinit void start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
struct device_node *l2_cache;
@@ -709,7 +709,7 @@ void start_secondary(void *unused)
local_irq_enable();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
BUG();
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 8a93778..8a28587 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -61,404 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
}
-/* Note: it is necessary to treat option as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
-{
- return sys_sysfs((int)option, arg1, arg2);
-}
-
-#ifdef CONFIG_SYSVIPC
-long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
- u32 fifth)
-{
- int version;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
-
- case SEMTIMEDOP:
- if (fifth)
- /* sign extend semid */
- return compat_sys_semtimedop((int)first,
- compat_ptr(ptr), second,
- compat_ptr(fifth));
- /* else fall through for normal semop() */
- case SEMOP:
- /* struct sembuf is the same on 32 and 64bit :)) */
- /* sign extend semid */
- return sys_semtimedop((int)first, compat_ptr(ptr), second,
- NULL);
- case SEMGET:
- /* sign extend key, nsems */
- return sys_semget((int)first, (int)second, third);
- case SEMCTL:
- /* sign extend semid, semnum */
- return compat_sys_semctl((int)first, (int)second, third,
- compat_ptr(ptr));
-
- case MSGSND:
- /* sign extend msqid */
- return compat_sys_msgsnd((int)first, (int)second, third,
- compat_ptr(ptr));
- case MSGRCV:
- /* sign extend msqid, msgtyp */
- return compat_sys_msgrcv((int)first, second, (int)fifth,
- third, version, compat_ptr(ptr));
- case MSGGET:
- /* sign extend key */
- return sys_msgget((int)first, second);
- case MSGCTL:
- /* sign extend msqid */
- return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
-
- case SHMAT:
- /* sign extend shmid */
- return compat_sys_shmat((int)first, second, third, version,
- compat_ptr(ptr));
- case SHMDT:
- return sys_shmdt(compat_ptr(ptr));
- case SHMGET:
- /* sign extend key_t */
- return sys_shmget((int)first, second, third);
- case SHMCTL:
- /* sign extend shmid */
- return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
-
- default:
- return -ENOSYS;
- }
-
- return -ENOSYS;
-}
-#endif
-
-/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sendfile_wrapper(u32 out_fd, u32 in_fd,
- compat_off_t __user *offset, u32 count)
-{
- return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
-}
-
-asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
- compat_loff_t __user *offset, u32 count)
-{
- return sys_sendfile((int)out_fd, (int)in_fd,
- (off_t __user *)offset, count);
-}
-
-/* Note: it is necessary to treat option as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
-{
- return sys_prctl((int)option,
- (unsigned long) arg2,
- (unsigned long) arg3,
- (unsigned long) arg4,
- (unsigned long) arg5);
-}
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_rr_get_interval_wrapper(u32 pid,
- struct compat_timespec __user *interval)
-{
- return compat_sys_sched_rr_get_interval((int)pid, interval);
-}
-
-/* Note: it is necessary to treat mode as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
-{
- return sys_access(filename, (int)mode);
-}
-
-
-/* Note: it is necessary to treat mode as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
-{
- return sys_creat(pathname, (int)mode);
-}
-
-
-/* Note: it is necessary to treat pid and options as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
-{
- return sys_waitpid((int)pid, stat_addr, (int)options);
-}
-
-
-/* Note: it is necessary to treat gidsetsize as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
-{
- return sys_getgroups((int)gidsetsize, grouplist);
-}
-
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_getpgid(u32 pid)
-{
- return sys_getpgid((int)pid);
-}
-
-
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_getsid(u32 pid)
-{
- return sys_getsid((int)pid);
-}
-
-
-/* Note: it is necessary to treat pid and sig as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_kill(u32 pid, u32 sig)
-{
- return sys_kill((int)pid, (int)sig);
-}
-
-
-/* Note: it is necessary to treat mode as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
-{
- return sys_mkdir(pathname, (int)mode);
-}
-
-long compat_sys_nice(u32 increment)
-{
- /* sign extend increment */
- return sys_nice((int)increment);
-}
-
-off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
-{
- /* sign extend n */
- return sys_lseek(fd, (int)offset, origin);
-}
-
-long compat_sys_truncate(const char __user * path, u32 length)
-{
- /* sign extend length */
- return sys_truncate(path, (int)length);
-}
-
-long compat_sys_ftruncate(int fd, u32 length)
-{
- /* sign extend length */
- return sys_ftruncate(fd, (int)length);
-}
-
-/* Note: it is necessary to treat bufsiz as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
-{
- return sys_readlink(path, buf, (int)bufsiz);
-}
-
-/* Note: it is necessary to treat option as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
-{
- return sys_sched_get_priority_max((int)policy);
-}
-
-
-/* Note: it is necessary to treat policy as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
-{
- return sys_sched_get_priority_min((int)policy);
-}
-
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
-{
- return sys_sched_getparam((int)pid, param);
-}
-
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_getscheduler(u32 pid)
-{
- return sys_sched_getscheduler((int)pid);
-}
-
-
-/* Note: it is necessary to treat pid as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
-{
- return sys_sched_setparam((int)pid, param);
-}
-
-
-/* Note: it is necessary to treat pid and policy as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
-{
- return sys_sched_setscheduler((int)pid, (int)policy, param);
-}
-
-
-/* Note: it is necessary to treat len as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
-{
- return sys_setdomainname(name, (int)len);
-}
-
-
-/* Note: it is necessary to treat gidsetsize as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
-{
- return sys_setgroups((int)gidsetsize, grouplist);
-}
-
-
-asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
-{
- /* sign extend len */
- return sys_sethostname(name, (int)len);
-}
-
-
-/* Note: it is necessary to treat pid and pgid as unsigned ints,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
-{
- return sys_setpgid((int)pid, (int)pgid);
-}
-
-long compat_sys_getpriority(u32 which, u32 who)
-{
- /* sign extend which and who */
- return sys_getpriority((int)which, (int)who);
-}
-
-long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
-{
- /* sign extend which, who and niceval */
- return sys_setpriority((int)which, (int)who, (int)niceval);
-}
-
-long compat_sys_ioprio_get(u32 which, u32 who)
-{
- /* sign extend which and who */
- return sys_ioprio_get((int)which, (int)who);
-}
-
-long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
-{
- /* sign extend which, who and ioprio */
- return sys_ioprio_set((int)which, (int)who, (int)ioprio);
-}
-
-/* Note: it is necessary to treat newmask as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_ssetmask(u32 newmask)
-{
- return sys_ssetmask((int) newmask);
-}
-
-asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
-{
- /* sign extend len */
- return sys_syslog(type, buf, (int)len);
-}
-
-
-/* Note: it is necessary to treat mask as an unsigned int,
- * with the corresponding cast to a signed int to insure that the
- * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
- * and the register representation of a signed int (msr in 64-bit mode) is performed.
- */
-asmlinkage long compat_sys_umask(u32 mask)
-{
- return sys_umask((int)mask);
-}
-
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
@@ -467,12 +69,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
}
-long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
-{
- /* sign extend tgid, pid */
- return sys_tgkill((int)tgid, (int)pid, sig);
-}
-
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
@@ -514,13 +110,6 @@ asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long h
return sys_ftruncate(fd, (high << 32) | low);
}
-long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
- size_t len)
-{
- return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
- buf, len);
-}
-
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
size_t len, int advice)
{
@@ -528,23 +117,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
advice);
}
-asmlinkage long compat_sys_add_key(const char __user *_type,
- const char __user *_description,
- const void __user *_payload,
- u32 plen,
- u32 ringid)
-{
- return sys_add_key(_type, _description, _payload, plen, ringid);
-}
-
-asmlinkage long compat_sys_request_key(const char __user *_type,
- const char __user *_description,
- const char __user *_callout_info,
- u32 destringid)
-{
- return sys_request_key(_type, _description, _callout_info, destringid);
-}
-
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
unsigned offset_hi, unsigned offset_lo,
unsigned nbytes_hi, unsigned nbytes_lo)
@@ -554,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
return sys_sync_file_range(fd, offset, nbytes, flags);
}
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
- unsigned mask_hi, unsigned mask_lo,
- int dfd, const char __user *pathname)
-{
- u64 mask = ((u64)mask_hi << 32) | mask_lo;
- return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0591d2a6..294b1c4e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -180,7 +180,7 @@ SYSFS_PMCSETUP(dscr, SPRN_DSCR);
SYSFS_PMCSETUP(pir, SPRN_PIR);
static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
-static DEVICE_ATTR(spurr, 0600, show_spurr, NULL);
+static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 9a9112f..58a41cf5 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
EXPORT_SYMBOL_GPL(ppc_tb_freq);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Factors for converting from cputime_t (timebase ticks) to
* jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
if (stolen)
account_steal_time(stolen);
}
+EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk)
account_user_time(tsk, utime, utimescaled);
}
-#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#define calc_cputime_factors()
#endif
@@ -679,7 +680,7 @@ int update_persistent_clock(struct timespec now)
struct rtc_time tm;
if (!ppc_md.set_rtc_time)
- return 0;
+ return -ENODEV;
to_tm(now.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
@@ -1059,10 +1060,8 @@ static int __init rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_RET(pdev);
}
module_init(rtc_init);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
new file mode 100644
index 0000000..2da67e7
--- /dev/null
+++ b/arch/powerpc/kernel/tm.S
@@ -0,0 +1,390 @@
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+
+#ifdef CONFIG_VSX
+/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */
+#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
+BEGIN_FTR_SECTION \
+ b 2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ SAVE_32FPRS_TRANSACT(n,base); \
+ b 3f; \
+2: SAVE_32VSRS_TRANSACT(n,c,base); \
+3:
+/* ...and this is just plain borrowed from there. */
+#define __REST_32FPRS_VSRS(n,c,base) \
+BEGIN_FTR_SECTION \
+ b 2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ REST_32FPRS(n,base); \
+ b 3f; \
+2: REST_32VSRS(n,c,base); \
+3:
+#else
+#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base)
+#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base)
+#endif
+#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
+ __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base)
+#define REST_32FPRS_VSRS(n,c,base) \
+ __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
+
+/* Stack frame offsets for local variables. */
+#define TM_FRAME_L0 TM_FRAME_SIZE-16
+#define TM_FRAME_L1 TM_FRAME_SIZE-8
+#define STACK_PARAM(x) (48+((x)*8))
+
+
+/* In order to access the TM SPRs, TM must be enabled. So, do so: */
+_GLOBAL(tm_enable)
+ mfmsr r4
+ li r3, MSR_TM >> 32
+ sldi r3, r3, 32
+ and. r0, r4, r3
+ bne 1f
+ or r4, r4, r3
+ mtmsrd r4
+1: blr
+
+_GLOBAL(tm_save_sprs)
+ mfspr r0, SPRN_TFHAR
+ std r0, THREAD_TM_TFHAR(r3)
+ mfspr r0, SPRN_TEXASR
+ std r0, THREAD_TM_TEXASR(r3)
+ mfspr r0, SPRN_TFIAR
+ std r0, THREAD_TM_TFIAR(r3)
+ blr
+
+_GLOBAL(tm_restore_sprs)
+ ld r0, THREAD_TM_TFHAR(r3)
+ mtspr SPRN_TFHAR, r0
+ ld r0, THREAD_TM_TEXASR(r3)
+ mtspr SPRN_TEXASR, r0
+ ld r0, THREAD_TM_TFIAR(r3)
+ mtspr SPRN_TFIAR, r0
+ blr
+
+ /* Passed an 8-bit failure cause as first argument. */
+_GLOBAL(tm_abort)
+ TABORT(R3)
+ blr
+
+
+/* void tm_reclaim(struct thread_struct *thread,
+ * unsigned long orig_msr,
+ * uint8_t cause)
+ *
+ * - Performs a full reclaim. This destroys outstanding
+ * transactions and updates thread->regs.tm_ckpt_* with the
+ * original checkpointed state. Note that thread->regs is
+ * unchanged.
+ * - FP regs are written back to thread->transact_fpr before
+ * reclaiming. These are the transactional (current) versions.
+ *
+ * Purpose is to both abort transactions of, and preserve the state of,
+ * a transactions at a context switch. We preserve/restore both sets of process
+ * state to restore them when the thread's scheduled again. We continue in
+ * userland as though nothing happened, but when the transaction is resumed
+ * they will abort back to the checkpointed state we save out here.
+ *
+ * Call with IRQs off, stacks get all out of sync for some periods in here!
+ */
+_GLOBAL(tm_reclaim)
+ mfcr r6
+ mflr r0
+ std r6, 8(r1)
+ std r0, 16(r1)
+ std r2, 40(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
+
+ /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
+
+ std r3, STACK_PARAM(0)(r1)
+ SAVE_NVGPRS(r1)
+
+ mfmsr r14
+ mr r15, r14
+ ori r15, r15, MSR_FP
+ oris r15, r15, MSR_VEC@h
+#ifdef CONFIG_VSX
+ BEGIN_FTR_SECTION
+ oris r15,r15, MSR_VSX@h
+ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r15
+ std r14, TM_FRAME_L0(r1)
+
+ /* Stash the stack pointer away for use after reclaim */
+ std r1, PACAR1(r13)
+
+ /* ******************** FPR/VR/VSRs ************
+ * Before reclaiming, capture the current/transactional FPR/VR
+ * versions /if used/.
+ *
+ * (If VSX used, FP and VMX are implied. Or, we don't need to look
+ * at MSR.VSX as copying FP regs if .FP, vector regs if .VMX covers it.)
+ *
+ * We're passed the thread's MSR as parameter 2.
+ *
+ * We enabled VEC/FP/VSX in the msr above, so we can execute these
+ * instructions!
+ */
+ andis. r0, r4, MSR_VEC@h
+ beq dont_backup_vec
+
+ SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */
+ mfvscr vr0
+ li r6, THREAD_TRANSACT_VSCR
+ stvx vr0, r3, r6
+ mfspr r0, SPRN_VRSAVE
+ std r0, THREAD_TRANSACT_VRSAVE(r3)
+
+dont_backup_vec:
+ andi. r0, r4, MSR_FP
+ beq dont_backup_fp
+
+ SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */
+
+ mffs fr0
+ stfd fr0,THREAD_TRANSACT_FPSCR(r3)
+
+dont_backup_fp:
+ /* The moment we treclaim, ALL of our GPRs will switch
+ * to user register state. (FPRs, CCR etc. also!)
+ * Use an sprg and a tm_scratch in the PACA to shuffle.
+ */
+ TRECLAIM(R5) /* Cause in r5 */
+
+ /* ******************** GPRs ******************** */
+ /* Stash the checkpointed r13 away in the scratch SPR and get the real
+ * paca
+ */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+
+ /* Stash the checkpointed r1 away in paca tm_scratch and get the real
+ * stack pointer back
+ */
+ std r1, PACATMSCRATCH(r13)
+ ld r1, PACAR1(r13)
+
+ /* Now get some more GPRS free */
+ std r7, GPR7(r1) /* Temporary stash */
+ std r12, GPR12(r1) /* '' '' '' */
+ ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
+
+ addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
+
+ /* Make r7 look like an exception frame so that we
+ * can use the neat GPRx(n) macros. r7 is NOT a pt_regs ptr!
+ */
+ subi r7, r7, STACK_FRAME_OVERHEAD
+
+ /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
+ SAVE_GPR(0, r7) /* user r0 */
+ SAVE_GPR(2, r7) /* user r2 */
+ SAVE_4GPRS(3, r7) /* user r3-r6 */
+ SAVE_4GPRS(8, r7) /* user r8-r11 */
+ ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r4, GPR7(r1) /* user r7 */
+ ld r5, GPR12(r1) /* user r12 */
+ GET_SCRATCH0(6) /* user r13 */
+ std r3, GPR1(r7)
+ std r4, GPR7(r7)
+ std r5, GPR12(r7)
+ std r6, GPR13(r7)
+
+ SAVE_NVGPRS(r7) /* user r14-r31 */
+
+ /* ******************** NIP ******************** */
+ mfspr r3, SPRN_TFHAR
+ std r3, _NIP(r7) /* Returns to failhandler */
+ /* The checkpointed NIP is ignored when rescheduling/rechkpting,
+ * but is used in signal return to 'wind back' to the abort handler.
+ */
+
+ /* ******************** CR,LR,CCR,MSR ********** */
+ mfctr r3
+ mflr r4
+ mfcr r5
+ mfxer r6
+
+ std r3, _CTR(r7)
+ std r4, _LINK(r7)
+ std r5, _CCR(r7)
+ std r6, _XER(r7)
+
+ /* MSR and flags: We don't change CRs, and we don't need to alter
+ * MSR.
+ */
+
+ /* TM regs, incl TEXASR -- these live in thread_struct. Note they've
+ * been updated by the treclaim, to explain to userland the failure
+ * cause (aborted).
+ */
+ mfspr r0, SPRN_TEXASR
+ mfspr r3, SPRN_TFHAR
+ mfspr r4, SPRN_TFIAR
+ std r0, THREAD_TM_TEXASR(r12)
+ std r3, THREAD_TM_TFHAR(r12)
+ std r4, THREAD_TM_TFIAR(r12)
+
+ /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+
+ /* Restore original MSR/IRQ state & clear TM mode */
+ ld r14, TM_FRAME_L0(r1) /* Orig MSR */
+ li r15, 0
+ rldimi r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1
+ mtmsrd r14
+
+ REST_NVGPRS(r1)
+
+ addi r1, r1, TM_FRAME_SIZE
+ ld r4, 8(r1)
+ ld r0, 16(r1)
+ mtcr r4
+ mtlr r0
+ ld r2, 40(r1)
+ blr
+
+
+ /* void tm_recheckpoint(struct thread_struct *thread,
+ * unsigned long orig_msr)
+ * - Restore the checkpointed register state saved by tm_reclaim
+ * when we switch_to a process.
+ *
+ * Call with IRQs off, stacks get all out of sync for
+ * some periods in here!
+ */
+_GLOBAL(tm_recheckpoint)
+ mfcr r5
+ mflr r0
+ std r5, 8(r1)
+ std r0, 16(r1)
+ std r2, 40(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
+
+ /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
+ * This is used for backing up the NVGPRs:
+ */
+ SAVE_NVGPRS(r1)
+
+ std r1, PACAR1(r13)
+
+ /* Load complete register state from ts_ckpt* registers */
+
+ addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */
+
+ /* Make r7 look like an exception frame so that we
+ * can use the neat GPRx(n) macros. r7 is now NOT a pt_regs ptr!
+ */
+ subi r7, r7, STACK_FRAME_OVERHEAD
+
+ SET_SCRATCH0(r1)
+
+ mfmsr r6
+ /* R4 = original MSR to indicate whether thread used FP/Vector etc. */
+
+ /* Enable FP/vec in MSR if necessary! */
+ lis r5, MSR_VEC@h
+ ori r5, r5, MSR_FP
+ and. r5, r4, r5
+ beq restore_gprs /* if neither, skip both */
+
+#ifdef CONFIG_VSX
+ BEGIN_FTR_SECTION
+ oris r5, r5, MSR_VSX@h
+ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
+ mtmsr r5
+
+#ifdef CONFIG_ALTIVEC
+ /* FP and VEC registers: These are recheckpointed from thread.fpr[]
+ * and thread.vr[] respectively. The thread.transact_fpr[] version
+ * is more modern, and will be loaded subsequently by any FPUnavailable
+ * trap.
+ */
+ andis. r0, r4, MSR_VEC@h
+ beq dont_restore_vec
+
+ li r5, THREAD_VSCR
+ lvx vr0, r3, r5
+ mtvscr vr0
+ REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
+ ld r5, THREAD_VRSAVE(r3)
+ mtspr SPRN_VRSAVE, r5
+#endif
+
+dont_restore_vec:
+ andi. r0, r4, MSR_FP
+ beq dont_restore_fp
+
+ lfd fr0, THREAD_FPSCR(r3)
+ MTFSF_L(fr0)
+ REST_32FPRS_VSRS(0, R4, R3)
+
+dont_restore_fp:
+ mtmsr r6 /* FP/Vec off again! */
+
+restore_gprs:
+ /* ******************** CR,LR,CCR,MSR ********** */
+ ld r3, _CTR(r7)
+ ld r4, _LINK(r7)
+ ld r5, _CCR(r7)
+ ld r6, _XER(r7)
+
+ mtctr r3
+ mtlr r4
+ mtcr r5
+ mtxer r6
+
+ /* MSR and flags: We don't change CRs, and we don't need to alter
+ * MSR.
+ */
+
+ REST_4GPRS(0, r7) /* GPR0-3 */
+ REST_GPR(4, r7) /* GPR4-6 */
+ REST_GPR(5, r7)
+ REST_GPR(6, r7)
+ REST_4GPRS(8, r7) /* GPR8-11 */
+ REST_2GPRS(12, r7) /* GPR12-13 */
+
+ REST_NVGPRS(r7) /* GPR14-31 */
+
+ ld r7, GPR7(r7) /* GPR7 */
+
+ /* Commit register state as checkpointed state: */
+ TRECHKPT
+
+ /* Our transactional state has now changed.
+ *
+ * Now just get out of here. Transactional (current) state will be
+ * updated once restore is called on the return path in the _switch-ed
+ * -to process.
+ */
+
+ GET_PACA(r13)
+ GET_SCRATCH0(r1)
+
+ REST_NVGPRS(r1)
+
+ addi r1, r1, TM_FRAME_SIZE
+ ld r4, 8(r1)
+ ld r0, 16(r1)
+ mtcr r4
+ mtlr r0
+ ld r2, 40(r1)
+ blr
+
+ /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a59a2ba..32a0f79 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -35,6 +35,7 @@
#include <linux/kdebug.h>
#include <linux/debugfs.h>
#include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
@@ -52,12 +53,14 @@
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
+#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
#include <asm/rio.h>
#include <asm/fadump.h>
#include <asm/switch_to.h>
+#include <asm/tm.h>
#include <asm/debug.h>
#include <sysdev/fsl_pci.h>
@@ -67,7 +70,7 @@ int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
-int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
EXPORT_SYMBOL(__debugger);
@@ -75,10 +78,17 @@ EXPORT_SYMBOL(__debugger_ipi);
EXPORT_SYMBOL(__debugger_bpt);
EXPORT_SYMBOL(__debugger_sstep);
EXPORT_SYMBOL(__debugger_iabr_match);
-EXPORT_SYMBOL(__debugger_dabr_match);
+EXPORT_SYMBOL(__debugger_break_match);
EXPORT_SYMBOL(__debugger_fault_handler);
#endif
+/* Transactional Memory trap debug */
+#ifdef TM_DEBUG_SW
+#define TM_DEBUG(x...) printk(KERN_INFO x)
+#else
+#define TM_DEBUG(x...) do { } while(0)
+#endif
+
/*
* Trap & Exception support
*/
@@ -139,7 +149,7 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
{
bust_spinlocks(0);
die_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
oops_exit();
printk("\n");
@@ -351,6 +361,7 @@ static inline int check_io_access(struct pt_regs *regs)
exception is in the MSR. */
#define get_reason(regs) ((regs)->msr)
#define get_mc_reason(regs) ((regs)->msr)
+#define REASON_TM 0x200000
#define REASON_FP 0x100000
#define REASON_ILLEGAL 0x80000
#define REASON_PRIVILEGED 0x40000
@@ -661,6 +672,7 @@ int machine_check_generic(struct pt_regs *regs)
void machine_check_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
int recover = 0;
__get_cpu_var(irq_stat).mce_exceptions++;
@@ -677,7 +689,7 @@ void machine_check_exception(struct pt_regs *regs)
recover = cur_cpu_spec->machine_check(regs);
if (recover > 0)
- return;
+ goto bail;
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
/* the qspan pci read routines can cause machine checks -- Cort
@@ -687,20 +699,23 @@ void machine_check_exception(struct pt_regs *regs)
* -- BenH
*/
bad_page_fault(regs, regs->dar, SIGBUS);
- return;
+ goto bail;
#endif
if (debugger_fault_handler(regs))
- return;
+ goto bail;
if (check_io_access(regs))
- return;
+ goto bail;
die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable Machine check");
+
+bail:
+ exception_exit(prev_state);
}
void SMIException(struct pt_regs *regs)
@@ -710,20 +725,29 @@ void SMIException(struct pt_regs *regs)
void unknown_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, 0, 0);
+
+ exception_exit(prev_state);
}
void instruction_breakpoint_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto bail;
if (debugger_iabr_match(regs))
- return;
+ goto bail;
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+
+bail:
+ exception_exit(prev_state);
}
void RunModeException(struct pt_regs *regs)
@@ -733,15 +757,20 @@ void RunModeException(struct pt_regs *regs)
void __kprobes single_step_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
clear_single_step(regs);
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- return;
+ goto bail;
if (debugger_sstep(regs))
- return;
+ goto bail;
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+
+bail:
+ exception_exit(prev_state);
}
/*
@@ -911,6 +940,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+ /* If we're emulating a load/store in an active transaction, we cannot
+ * emulate it as the kernel operates in transaction suspended context.
+ * We need to abort the transaction. This creates a persistent TM
+ * abort so tell the user what caused it with a new code.
+ */
+ if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+ tm_enable();
+ tm_abort(cause);
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+ return false;
+}
+#endif
+
static int emulate_instruction(struct pt_regs *regs)
{
u32 instword;
@@ -950,6 +1001,9 @@ static int emulate_instruction(struct pt_regs *regs)
/* Emulate load/store string insn. */
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+ if (tm_abort_check(regs,
+ TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+ return -EINVAL;
PPC_WARN_EMULATED(string, regs);
return emulate_string_inst(regs, instword);
}
@@ -968,7 +1022,10 @@ static int emulate_instruction(struct pt_regs *regs)
#ifdef CONFIG_PPC64
/* Emulate the mfspr rD, DSCR. */
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
+ PPC_INST_MFSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
+ PPC_INST_MFSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mfdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -976,7 +1033,10 @@ static int emulate_instruction(struct pt_regs *regs)
return 0;
}
/* Emulate the mtspr DSCR, rD. */
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
+ PPC_INST_MTSPR_DSCR_USER) ||
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
+ PPC_INST_MTSPR_DSCR)) &&
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
@@ -997,6 +1057,7 @@ int is_valid_bugaddr(unsigned long addr)
void __kprobes program_check_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
@@ -1006,27 +1067,59 @@ void __kprobes program_check_exception(struct pt_regs *regs)
if (reason & REASON_FP) {
/* IEEE FP exception */
parse_fpe(regs);
- return;
+ goto bail;
}
if (reason & REASON_TRAP) {
/* Debugger is first in line to stop recursive faults in
* rcu_lock, notify_die, or atomic_notifier_call_chain */
if (debugger_bpt(regs))
- return;
+ goto bail;
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
- return;
+ goto bail;
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
- return;
+ goto bail;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
- return;
+ goto bail;
+ }
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (reason & REASON_TM) {
+ /* This is a TM "Bad Thing Exception" program check.
+ * This occurs when:
+ * - An rfid/hrfid/mtmsrd attempts to cause an illegal
+ * transition in TM states.
+ * - A trechkpt is attempted when transactional.
+ * - A treclaim is attempted when non transactional.
+ * - A tend is illegally attempted.
+ * - writing a TM SPR when transactional.
+ */
+ if (!user_mode(regs) &&
+ report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
+ regs->nip += 4;
+ goto bail;
+ }
+ /* If usermode caused this, it's done something illegal and
+ * gets a SIGILL slap on the wrist. We call it an illegal
+ * operand to distinguish from the instruction just being bad
+ * (e.g. executing a 'tend' on a CPU without TM!); it's an
+ * illegal /placement/ of a valid instruction.
+ */
+ if (user_mode(regs)) {
+ _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
+ goto bail;
+ } else {
+ printk(KERN_EMERG "Unexpected TM Bad Thing exception "
+ "at %lx (msr 0x%x)\n", regs->nip, reason);
+ die("Unrecoverable exception", regs, SIGABRT);
+ }
}
+#endif
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
@@ -1043,16 +1136,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
switch (do_mathemu(regs)) {
case 0:
emulate_single_step(regs);
- return;
+ goto bail;
case 1: {
int code = 0;
code = __parse_fpscr(current->thread.fpscr.val);
_exception(SIGFPE, regs, code, regs->nip);
- return;
+ goto bail;
}
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- return;
+ goto bail;
}
/* fall through on any other errors */
#endif /* CONFIG_MATH_EMULATION */
@@ -1063,10 +1156,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
case 0:
regs->nip += 4;
emulate_single_step(regs);
- return;
+ goto bail;
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- return;
+ goto bail;
}
}
@@ -1074,16 +1167,33 @@ void __kprobes program_check_exception(struct pt_regs *regs)
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
else
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+
+bail:
+ exception_exit(prev_state);
+}
+
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+ regs->msr |= REASON_ILLEGAL;
+ program_check_exception(regs);
}
void alignment_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
int sig, code, fixed = 0;
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
+ if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+ goto bail;
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
@@ -1091,7 +1201,7 @@ void alignment_exception(struct pt_regs *regs)
if (fixed == 1) {
regs->nip += 4; /* skip over emulated instruction */
emulate_single_step(regs);
- return;
+ goto bail;
}
/* Operand address was bad */
@@ -1106,6 +1216,9 @@ void alignment_exception(struct pt_regs *regs)
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, regs->dar, sig);
+
+bail:
+ exception_exit(prev_state);
}
void StackOverflow(struct pt_regs *regs)
@@ -1134,23 +1247,32 @@ void trace_syscall(struct pt_regs *regs)
void kernel_fp_unavailable_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
+
+ exception_exit(prev_state);
}
void altivec_unavailable_exception(struct pt_regs *regs)
{
+ enum ctx_state prev_state = exception_enter();
+
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return;
+ goto bail;
}
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
+
+bail:
+ exception_exit(prev_state);
}
void vsx_unavailable_exception(struct pt_regs *regs)
@@ -1167,6 +1289,109 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
+void tm_unavailable_exception(struct pt_regs *regs)
+{
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
+ /* Currently we never expect a TMU exception. Catch
+ * this and kill the process!
+ */
+ printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
+ "(msr %lx)\n",
+ regs->nip, regs->msr);
+
+ if (user_mode(regs)) {
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return;
+ }
+
+ die("Unexpected TM unavailable exception", regs, SIGABRT);
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+
+extern void do_load_up_fpu(struct pt_regs *regs);
+
+void fp_unavailable_tm(struct pt_regs *regs)
+{
+ /* Note: This does not handle any kind of FP laziness. */
+
+ TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
+ regs->nip, regs->msr);
+ tm_enable();
+
+ /* We can only have got here if the task started using FP after
+ * beginning the transaction. So, the transactional regs are just a
+ * copy of the checkpointed ones. But, we still need to recheckpoint
+ * as we're enabling FP for the process; it will return, abort the
+ * transaction, and probably retry but now with FP enabled. So the
+ * checkpointed FP registers need to be loaded.
+ */
+ tm_reclaim(&current->thread, current->thread.regs->msr,
+ TM_CAUSE_FAC_UNAV);
+ /* Reclaim didn't save out any FPRs to transact_fprs. */
+
+ /* Enable FP for the task: */
+ regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+
+ /* This loads and recheckpoints the FP registers from
+ * thread.fpr[]. They will remain in registers after the
+ * checkpoint so we don't need to reload them after.
+ */
+ tm_recheckpoint(&current->thread, regs->msr);
+}
+
+#ifdef CONFIG_ALTIVEC
+extern void do_load_up_altivec(struct pt_regs *regs);
+
+void altivec_unavailable_tm(struct pt_regs *regs)
+{
+ /* See the comments in fp_unavailable_tm(). This function operates
+ * the same way.
+ */
+
+ TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
+ "MSR=%lx\n",
+ regs->nip, regs->msr);
+ tm_enable();
+ tm_reclaim(&current->thread, current->thread.regs->msr,
+ TM_CAUSE_FAC_UNAV);
+ regs->msr |= MSR_VEC;
+ tm_recheckpoint(&current->thread, regs->msr);
+ current->thread.used_vr = 1;
+}
+#endif
+
+#ifdef CONFIG_VSX
+void vsx_unavailable_tm(struct pt_regs *regs)
+{
+ /* See the comments in fp_unavailable_tm(). This works similarly,
+ * though we're loading both FP and VEC registers in here.
+ *
+ * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
+ * regs. Either way, set MSR_VSX.
+ */
+
+ TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
+ "MSR=%lx\n",
+ regs->nip, regs->msr);
+
+ tm_enable();
+ /* This reclaims FP and/or VR regs if they're already enabled */
+ tm_reclaim(&current->thread, current->thread.regs->msr,
+ TM_CAUSE_FAC_UNAV);
+
+ regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
+ MSR_VSX;
+ /* This loads & recheckpoints FP and VRs. */
+ tm_recheckpoint(&current->thread, regs->msr);
+ current->thread.used_vsr = 1;
+}
+#endif
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
void performance_monitor_exception(struct pt_regs *regs)
{
__get_cpu_var(irq_stat).pmu_irqs++;
@@ -1522,7 +1747,7 @@ void unrecoverable_exception(struct pt_regs *regs)
die("Unrecoverable exception", regs, SIGABRT);
}
-#ifdef CONFIG_BOOKE_WDT
+#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
/*
* Default handler for a Watchdog exception,
* spins until a reboot occurs
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index f974849..9d3fdcd 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
udbg_init_wsp();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
+ /* In memory console */
+ udbg_init_memcons();
#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
udbg_init_ehv_bc();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
@@ -156,15 +159,13 @@ static struct console udbg_console = {
.index = 0,
};
-static int early_console_initialized;
-
/*
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
* Call it again after setting udbg_putc in ppc_md->setup_arch.
*/
void __init register_early_udbg_console(void)
{
- if (early_console_initialized)
+ if (early_console)
return;
if (!udbg_putc)
@@ -174,7 +175,7 @@ void __init register_early_udbg_console(void)
printk(KERN_INFO "early console immortal !\n");
udbg_console.flags &= ~CON_BOOT;
}
- early_console_initialized = 1;
+ early_console = &udbg_console;
register_console(&udbg_console);
}
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index bc77834..59f419b 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -31,6 +31,16 @@
#define UPROBE_TRAP_NR UINT_MAX
/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ return (is_trap(*insn));
+}
+
+/**
* arch_uprobe_analyze_insn
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
@@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
if (addr & 0x03)
return -EINVAL;
- /*
- * We currently don't support a uprobe on an already
- * existing breakpoint instruction underneath
- */
- if (is_trap(auprobe->ainsn))
- return -ENOTSUPP;
return 0;
}
@@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
return false;
}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = regs->link;
+
+ /* Replace the return addr with trampoline addr */
+ regs->link = trampoline_vaddr;
+
+ return orig_ret_vaddr;
+}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1b2076f..d4f463a 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -113,6 +113,10 @@ static struct vdso_patch_def vdso_patches[] = {
CPU_FTR_USE_TB, 0,
"__kernel_get_tbfreq", NULL
},
+ {
+ CPU_FTR_USE_TB, 0,
+ "__kernel_time", NULL
+ },
};
/*
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 4ee09ee..27e2f62 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -181,6 +181,32 @@ V_FUNCTION_END(__kernel_clock_getres)
/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+
+ mr r11,r3 /* r11 holds t */
+ bl __get_datapage@local
+ mr r9, r3 /* datapage ptr in r9 */
+
+ lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
+
+ cmplwi r11,0 /* check if t is NULL */
+ beq 2f
+ stw r3,0(r11) /* store result at *t */
+2: mtlr r12
+ crclr cr0*4+so
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_time)
+
+/*
* This is the core of clock_gettime() and gettimeofday(),
* it returns the current time in r3 (seconds) and r4.
* On entry, r7 gives the resolution of r4, either USEC_PER_SEC
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 43200ba..f223409 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -150,6 +150,7 @@ VERSION
#ifdef CONFIG_PPC64
__kernel_getcpu;
#endif
+ __kernel_time;
local: *;
};
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index e97a9a0..a76b4af 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -164,6 +164,32 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
.cfi_endproc
V_FUNCTION_END(__kernel_clock_getres)
+/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+
+ mr r11,r3 /* r11 holds t */
+ bl V_LOCAL_FUNC(__get_datapage)
+
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+
+ cmpldi r11,0 /* check if t is NULL */
+ beq 2f
+ std r4,0(r11) /* store result at *t */
+2: mtlr r12
+ crclr cr0*4+so
+ mr r3,r4
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_time)
+
/*
* This is the core of clock_gettime() and gettimeofday(),
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e6c1758..e486381 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -147,6 +147,7 @@ VERSION
__kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
__kernel_getcpu;
+ __kernel_time;
local: *;
};
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index e830289..9e20999 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -7,6 +7,57 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Wrapper to call load_up_altivec from C.
+ * void do_load_up_altivec(struct pt_regs *regs);
+ */
+_GLOBAL(do_load_up_altivec)
+ mflr r0
+ std r0, 16(r1)
+ stdu r1, -112(r1)
+
+ subi r6, r3, STACK_FRAME_OVERHEAD
+ /* load_up_altivec expects r12=MSR, r13=PACA, and returns
+ * with r12 = new MSR.
+ */
+ ld r12,_MSR(r6)
+ GET_PACA(r13)
+ bl load_up_altivec
+ std r12,_MSR(r6)
+
+ ld r0, 112+16(r1)
+ addi r1, r1, 112
+ mtlr r0
+ blr
+
+/* void do_load_up_transact_altivec(struct thread_struct *thread)
+ *
+ * This is similar to load_up_altivec but for the transactional version of the
+ * vector regs. It doesn't mess with the task MSR or valid flags.
+ * Furthermore, VEC laziness is not supported with TM currently.
+ */
+_GLOBAL(do_load_up_transact_altivec)
+ mfmsr r6
+ oris r5,r6,MSR_VEC@h
+ MTMSRD(r5)
+ isync
+
+ li r4,1
+ stw r4,THREAD_USED_VR(r3)
+
+ li r10,THREAD_TRANSACT_VSCR
+ lvx vr0,r10,r3
+ mtvscr vr0
+ REST_32VRS_TRANSACT(0,r4,r3)
+
+ /* Disable VEC again. */
+ MTMSRD(r6)
+ isync
+
+ blr
+#endif
+
/*
* load_up_altivec(unused, unused, tsk)
* Disable VMX for the task which had it previously,
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 65d1c08..654e479 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -218,6 +218,11 @@ SECTIONS
.got : AT(ADDR(.got) - LOAD_OFFSET) {
__toc_start = .;
+#ifndef CONFIG_RELOCATABLE
+ __prom_init_toc_start = .;
+ arch/powerpc/kernel/prom_init.o*(.toc .got)
+ __prom_init_toc_end = .;
+#endif
*(.got)
*(.toc)
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 656e0bc..eb643f8 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -163,6 +163,14 @@ config KVM_MPIC
Currently, support is limited to certain versions of
Freescale's MPIC implementation.
+config KVM_XICS
+ bool "KVM in-kernel XICS emulation"
+ depends on KVM_BOOK3S_64 && !KVM_MPIC
+ ---help---
+ Include support for the XICS (eXternal Interrupt Controller
+ Specification) interrupt controller architecture used on
+ IBM POWER (pSeries) servers.
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 4eada0c..422de3f 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -72,12 +72,18 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o
+kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
+ book3s_hv_rm_xics.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \
book3s_hv_ras.o \
- book3s_hv_builtin.o
+ book3s_hv_builtin.o \
+ $(kvm-book3s_64-builtin-xics-objs-y)
+
+kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
+ book3s_xics.o
kvm-book3s_64-module-objs := \
../../../virt/kvm/kvm_main.o \
@@ -86,6 +92,7 @@ kvm-book3s_64-module-objs := \
emulate.o \
book3s.o \
book3s_64_vio.o \
+ book3s_rtas.o \
$(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 128ed3a..700df6f 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -104,7 +104,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
return prio;
}
-static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec)
{
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -535,6 +535,15 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
&opcode, sizeof(u32));
break;
}
+#ifdef CONFIG_KVM_XICS
+ case KVM_REG_PPC_ICP_STATE:
+ if (!vcpu->arch.icp) {
+ r = -ENXIO;
+ break;
+ }
+ val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
+ break;
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
@@ -597,6 +606,16 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_KVM_XICS
+ case KVM_REG_PPC_ICP_STATE:
+ if (!vcpu->arch.icp) {
+ r = -ENXIO;
+ break;
+ }
+ r = kvmppc_xics_set_icp(vcpu,
+ set_reg_val(reg->id, val));
+ break;
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e3..3a9a1ac 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -143,7 +143,7 @@ map_again:
}
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- MMU_PAGE_4K, MMU_SEGSIZE_256M);
+ MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu3s->context_id[0] = err;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
- << USER_ESID_BITS) - 1;
- vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+ << ESID_BITS) - 1;
+ vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8cc18ab..5880dfb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -893,7 +893,10 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Harvest R and C */
rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
- rev[i].guest_rpte = ptel | rcbits;
+ if (rcbits & ~rev[i].guest_rpte) {
+ rev[i].guest_rpte = ptel | rcbits;
+ note_hpte_modification(kvm, &rev[i]);
+ }
}
unlock_rmap(rmapp);
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -976,7 +979,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Now check and modify the HPTE */
if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
kvmppc_clear_ref_hpte(kvm, hptep, i);
- rev[i].guest_rpte |= HPTE_R_R;
+ if (!(rev[i].guest_rpte & HPTE_R_R)) {
+ rev[i].guest_rpte |= HPTE_R_R;
+ note_hpte_modification(kvm, &rev[i]);
+ }
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -1080,7 +1086,10 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
hptep[1] &= ~HPTE_R_C;
eieio();
hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
- rev[i].guest_rpte |= HPTE_R_C;
+ if (!(rev[i].guest_rpte & HPTE_R_C)) {
+ rev[i].guest_rpte |= HPTE_R_C;
+ note_hpte_modification(kvm, &rev[i]);
+ }
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
@@ -1090,11 +1099,30 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
return ret;
}
+static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
+ struct kvm_memory_slot *memslot,
+ unsigned long *map)
+{
+ unsigned long gfn;
+
+ if (!vpa->dirty || !vpa->pinned_addr)
+ return;
+ gfn = vpa->gpa >> PAGE_SHIFT;
+ if (gfn < memslot->base_gfn ||
+ gfn >= memslot->base_gfn + memslot->npages)
+ return;
+
+ vpa->dirty = false;
+ if (map)
+ __set_bit_le(gfn - memslot->base_gfn, map);
+}
+
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long *map)
{
unsigned long i;
unsigned long *rmapp;
+ struct kvm_vcpu *vcpu;
preempt_disable();
rmapp = memslot->arch.rmap;
@@ -1103,6 +1131,15 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
__set_bit_le(i, map);
++rmapp;
}
+
+ /* Harvest dirty bits from VPA and DTL updates */
+ /* Note: we never modify the SLB shadow buffer areas */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
+ harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ }
preempt_enable();
return 0;
}
@@ -1114,7 +1151,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
unsigned long gfn = gpa >> PAGE_SHIFT;
struct page *page, *pages[1];
int npages;
- unsigned long hva, psize, offset;
+ unsigned long hva, offset;
unsigned long pa;
unsigned long *physp;
int srcu_idx;
@@ -1146,14 +1183,9 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- psize = PAGE_SIZE;
- if (PageHuge(page)) {
- page = compound_head(page);
- psize <<= compound_order(page);
- }
- offset = gpa & (psize - 1);
+ offset = gpa & (PAGE_SIZE - 1);
if (nb_ret)
- *nb_ret = psize - offset;
+ *nb_ret = PAGE_SIZE - offset;
return page_address(page) + offset;
err:
@@ -1161,11 +1193,31 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
return NULL;
}
-void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
+void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
+ bool dirty)
{
struct page *page = virt_to_page(va);
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn;
+ unsigned long *rmap;
+ int srcu_idx;
put_page(page);
+
+ if (!dirty || !kvm->arch.using_mmu_notifiers)
+ return;
+
+ /* We need to mark this page dirty in the rmap chain */
+ gfn = gpa >> PAGE_SHIFT;
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (memslot) {
+ rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ lock_rmap(rmap);
+ *rmap |= KVMPPC_RMAP_CHANGED;
+ unlock_rmap(rmap);
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
}
/*
@@ -1193,16 +1245,36 @@ struct kvm_htab_ctx {
#define HPTE_SIZE (2 * sizeof(unsigned long))
+/*
+ * Returns 1 if this HPT entry has been modified or has pending
+ * R/C bit changes.
+ */
+static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
+{
+ unsigned long rcbits_unset;
+
+ if (revp->guest_rpte & HPTE_GR_MODIFIED)
+ return 1;
+
+ /* Also need to consider changes in reference and changed bits */
+ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+ if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
+ return 1;
+
+ return 0;
+}
+
static long record_hpte(unsigned long flags, unsigned long *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
unsigned long v, r;
+ unsigned long rcbits_unset;
int ok = 1;
int valid, dirty;
/* Unmodified entries are uninteresting except on the first pass */
- dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+ dirty = hpte_dirty(revp, hptp);
if (!first_pass && !dirty)
return 0;
@@ -1223,16 +1295,28 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
v = hptp[0];
+
+ /* re-evaluate valid and dirty from synchronized HPTE value */
+ valid = !!(v & HPTE_V_VALID);
+ dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+
+ /* Harvest R and C into guest view if necessary */
+ rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+ if (valid && (rcbits_unset & hptp[1])) {
+ revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
+ HPTE_GR_MODIFIED;
+ dirty = 1;
+ }
+
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
+ valid = 1;
}
- /* re-evaluate valid and dirty from synchronized HPTE value */
- valid = !!(v & HPTE_V_VALID);
if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
valid = 0;
- r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C));
- dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+
+ r = revp->guest_rpte;
/* only clear modified if this is the right sort of entry */
if (valid == want_valid && dirty) {
r &= ~HPTE_GR_MODIFIED;
@@ -1288,7 +1372,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
/* Skip uninteresting entries, i.e. clean on not-first pass */
if (!first_pass) {
while (i < kvm->arch.hpt_npte &&
- !(revp->guest_rpte & HPTE_GR_MODIFIED)) {
+ !hpte_dirty(revp, hptp)) {
++i;
hptp += 2;
++revp;
@@ -1467,7 +1551,7 @@ static int kvm_htab_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_htab_fops = {
+static const struct file_operations kvm_htab_fops = {
.read = kvm_htab_read,
.write = kvm_htab_write,
.llseek = default_llseek,
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 72ffc89..b2d3f3b 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -92,7 +92,7 @@ static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_spapr_tce_fops = {
+static const struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 71d0c90..550f592 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -66,6 +66,31 @@
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int me;
+ int cpu = vcpu->cpu;
+ wait_queue_head_t *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+ if (waitqueue_active(wqp)) {
+ wake_up_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+ me = get_cpu();
+
+ /* CPU points to the first thread of the core */
+ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+ int real_cpu = cpu + vcpu->arch.ptid;
+ if (paca[real_cpu].kvm_hstate.xics_phys)
+ xics_wake_cpu(real_cpu);
+ else if (cpu_online(cpu))
+ smp_send_reschedule(cpu);
+ }
+ put_cpu();
+}
+
/*
* We use the vcpu_load/put functions to measure stolen time.
* Stolen time is counted as time when either the vcpu is able to
@@ -259,7 +284,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
len = ((struct reg_vpa *)va)->length.hword;
else
len = ((struct reg_vpa *)va)->length.word;
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, vpa, false);
/* Check length */
if (len > nb || len < sizeof(struct reg_vpa))
@@ -359,13 +384,13 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
va = NULL;
nb = 0;
if (gpa)
- va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ va = kvmppc_pin_guest_page(kvm, gpa, &nb);
spin_lock(&vcpu->arch.vpa_update_lock);
if (gpa == vpap->next_gpa)
break;
/* sigh... unpin that one and try again */
if (va)
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, gpa, false);
}
vpap->update_pending = 0;
@@ -375,12 +400,15 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
* has changed the mappings underlying guest memory,
* so unregister the region.
*/
- kvmppc_unpin_guest_page(kvm, va);
+ kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
if (vpap->pinned_addr)
- kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+ kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
+ vpap->dirty);
+ vpap->gpa = gpa;
vpap->pinned_addr = va;
+ vpap->dirty = false;
if (va)
vpap->pinned_end = va + vpap->len;
}
@@ -472,6 +500,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
/* order writing *dt vs. writing vpa->dtl_idx */
smp_wmb();
vpa->dtl_idx = ++vcpu->arch.dtl_index;
+ vcpu->arch.dtl.dirty = true;
}
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -479,7 +508,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
struct kvm_vcpu *tvcpu;
- int idx;
+ int idx, rc;
switch (req) {
case H_ENTER:
@@ -515,6 +544,30 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
break;
+ case H_RTAS:
+ if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ return RESUME_HOST;
+
+ rc = kvmppc_rtas_hcall(vcpu);
+
+ if (rc == -ENOENT)
+ return RESUME_HOST;
+ else if (rc == 0)
+ break;
+
+ /* Send the error out to userspace via KVM_RUN */
+ return rc;
+
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
+ if (kvmppc_xics_enabled(vcpu)) {
+ ret = kvmppc_xics_hcall(vcpu, req);
+ break;
+ } /* fallthrough */
default:
return RESUME_HOST;
}
@@ -913,15 +966,19 @@ out:
return ERR_PTR(err);
}
+static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
+{
+ if (vpa->pinned_addr)
+ kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
+ vpa->dirty);
+}
+
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vpa_update_lock);
- if (vcpu->arch.dtl.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
- if (vcpu->arch.slb_shadow.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
- if (vcpu->arch.vpa.pinned_addr)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
+ unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -955,7 +1012,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
}
extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern void xics_wake_cpu(int cpu);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu)
@@ -1330,9 +1386,12 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
break;
vc->runner = vcpu;
n_ceded = 0;
- list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
+ list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
if (!v->arch.pending_exceptions)
n_ceded += v->arch.ceded;
+ else
+ v->arch.ceded = 0;
+ }
if (n_ceded == vc->n_runnable)
kvmppc_vcore_blocked(vc);
else
@@ -1483,7 +1542,7 @@ static int kvm_rma_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_rma_fops = {
+static const struct file_operations kvm_rma_fops = {
.mmap = kvm_rma_mmap,
.release = kvm_rma_release,
};
@@ -1515,7 +1574,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
(*sps)->page_shift = def->shift;
(*sps)->slb_enc = def->sllp;
(*sps)->enc[0].page_shift = def->shift;
- (*sps)->enc[0].pte_enc = def->penc;
+ /*
+ * Only return base page encoding. We don't want to return
+ * all the supporting pte_enc, because our H_ENTER doesn't
+ * support MPSS yet. Once they do, we can start passing all
+ * support pte_enc here
+ */
+ (*sps)->enc[0].pte_enc = def->penc[linux_psize];
(*sps)++;
}
@@ -1549,7 +1614,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
@@ -1639,12 +1704,12 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
- if (npages && old.npages) {
+ if (npages && old->npages) {
/*
* If modifying a memslot, reset all the rmap dirty bits.
* If this is a new memslot, we don't need to do anything
@@ -1821,6 +1886,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
cpumask_setall(&kvm->arch.need_tlb_flush);
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
kvm->arch.rma = NULL;
@@ -1866,6 +1932,8 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
kvm->arch.rma = NULL;
}
+ kvmppc_rtas_tokens_free(kvm);
+
kvmppc_free_hpt(kvm);
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 84035a5..37f1cc4 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -122,11 +122,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
+#ifdef CONFIG_SMP
/*
* On PPC970, if the guest vcpu has an external interrupt pending,
* send ourselves an IPI so as to interrupt the guest once it
* enables interrupts. (It must have interrupts disabled,
* otherwise we would already have delivered the interrupt.)
+ *
+ * XXX If this is a UP build, smp_send_reschedule is not available,
+ * so the interrupt will be delayed until the next time the vcpu
+ * enters the guest with interrupts enabled.
*/
BEGIN_FTR_SECTION
ld r0, VCPU_PENDING_EXC(r4)
@@ -141,6 +146,7 @@ BEGIN_FTR_SECTION
mr r4, r31
32:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+#endif /* CONFIG_SMP */
/* Jump to partition switch code */
bl .kvmppc_hv_entry_trampoline
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 19c93ba..6dcbb49 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -97,17 +97,6 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
}
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
-/*
- * Note modification of an HPTE; set the HPTE modified bit
- * if anyone is interested.
- */
-static inline void note_hpte_modification(struct kvm *kvm,
- struct revmap_entry *rev)
-{
- if (atomic_read(&kvm->arch.hpte_mod_interest))
- rev->guest_rpte |= HPTE_GR_MODIFIED;
-}
-
/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index,
struct revmap_entry *rev,
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
new file mode 100644
index 0000000..b4b0082
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+#include "book3s_xics.h"
+
+#define DEBUG_PASSUP
+
+static inline void rm_writeb(unsigned long paddr, u8 val)
+{
+ __asm__ __volatile__("sync; stbcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu *this_vcpu)
+{
+ struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
+ unsigned long xics_phys;
+ int cpu;
+
+ /* Mark the target VCPU as having an interrupt pending */
+ vcpu->stat.queue_intr++;
+ set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
+
+ /* Kick self ? Just set MER and return */
+ if (vcpu == this_vcpu) {
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
+ return;
+ }
+
+ /* Check if the core is loaded, if not, too hard */
+ cpu = vcpu->cpu;
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
+ this_icp->rm_action |= XICS_RM_KICK_VCPU;
+ this_icp->rm_kick_target = vcpu;
+ return;
+ }
+ /* In SMT cpu will always point to thread 0, we adjust it */
+ cpu += vcpu->arch.ptid;
+
+ /* Not too hard, then poke the target */
+ xics_phys = paca[cpu].kvm_hstate.xics_phys;
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+}
+
+static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
+{
+ /* Note: Only called on self ! */
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
+ &vcpu->arch.pending_exceptions);
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
+}
+
+static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
+ union kvmppc_icp_state old,
+ union kvmppc_icp_state new)
+{
+ struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
+ bool success;
+
+ /* Calculate new output value */
+ new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
+
+ /* Attempt atomic update */
+ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
+ if (!success)
+ goto bail;
+
+ /*
+ * Check for output state update
+ *
+ * Note that this is racy since another processor could be updating
+ * the state already. This is why we never clear the interrupt output
+ * here, we only ever set it. The clear only happens prior to doing
+ * an update and only by the processor itself. Currently we do it
+ * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
+ *
+ * We also do not try to figure out whether the EE state has changed,
+ * we unconditionally set it if the new state calls for it. The reason
+ * for that is that we opportunistically remove the pending interrupt
+ * flag when raising CPPR, so we need to set it back here if an
+ * interrupt is still pending.
+ */
+ if (new.out_ee)
+ icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
+
+ /* Expose the state change for debug purposes */
+ this_vcpu->arch.icp->rm_dbgstate = new;
+ this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
+
+ bail:
+ return success;
+}
+
+static inline int check_too_hard(struct kvmppc_xics *xics,
+ struct kvmppc_icp *icp)
+{
+ return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
+}
+
+static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u8 new_cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool resend;
+
+ /*
+ * This handles several related states in one operation:
+ *
+ * ICP State: Down_CPPR
+ *
+ * Load CPPR with new value and if the XISR is 0
+ * then check for resends:
+ *
+ * ICP State: Resend
+ *
+ * If MFRR is more favored than CPPR, check for IPIs
+ * and notify ICS of a potential resend. This is done
+ * asynchronously (when used in real mode, we will have
+ * to exit here).
+ *
+ * We do not handle the complete Check_IPI as documented
+ * here. In the PAPR, this state will be used for both
+ * Set_MFRR and Down_CPPR. However, we know that we aren't
+ * changing the MFRR state here so we don't need to handle
+ * the case of an MFRR causing a reject of a pending irq,
+ * this will have been handled when the MFRR was set in the
+ * first place.
+ *
+ * Thus we don't have to handle rejects, only resends.
+ *
+ * When implementing real mode for HV KVM, resend will lead to
+ * a H_TOO_HARD return and the whole transaction will be handled
+ * in virtual mode.
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+
+ /*
+ * Cut down Resend / Check_IPI / IPI
+ *
+ * The logic is that we cannot have a pending interrupt
+ * trumped by an IPI at this point (see above), so we
+ * know that either the pending interrupt is already an
+ * IPI (in which case we don't care to override it) or
+ * it's either more favored than us or non existent
+ */
+ if (new_state.mfrr < new_cppr &&
+ new_state.mfrr <= new_state.pending_pri) {
+ new_state.pending_pri = new_state.mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ /* Latch/clear resend bit */
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /*
+ * Now handle resend checks. Those are asynchronous to the ICP
+ * state update in HW (ie bus transactions) so we can handle them
+ * separately here as well.
+ */
+ if (resend)
+ icp->rm_action |= XICS_RM_CHECK_RESEND;
+}
+
+
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 xirr;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /* First clear the interrupt */
+ icp_rm_clr_vcpu_irq(icp->vcpu);
+
+ /*
+ * ICP State: Accept_Interrupt
+ *
+ * Return the pending interrupt (if any) along with the
+ * current CPPR, then clear the XISR & set CPPR to the
+ * pending priority
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+ break;
+ new_state.cppr = new_state.pending_pri;
+ new_state.pending_pri = 0xff;
+ new_state.xisr = 0;
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Return the result in GPR4 */
+ vcpu->arch.gpr[4] = xirr;
+
+ return check_too_hard(xics, icp);
+}
+
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
+ u32 reject;
+ bool resend;
+ bool local;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ local = this_icp->server_num == server;
+ if (local)
+ icp = this_icp;
+ else
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+
+ /*
+ * ICP state: Set_MFRR
+ *
+ * If the CPPR is more favored than the new MFRR, then
+ * nothing needs to be done as there can be no XISR to
+ * reject.
+ *
+ * If the CPPR is less favored, then we might be replacing
+ * an interrupt, and thus need to possibly reject it as in
+ *
+ * ICP state: Check_IPI
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+
+ /* Check_IPI */
+ reject = 0;
+ resend = false;
+ if (mfrr < new_state.cppr) {
+ /* Reject a pending interrupt if not an IPI */
+ if (mfrr <= new_state.pending_pri)
+ reject = new_state.xisr;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Pass rejects to virtual mode */
+ if (reject && reject != XICS_IPI) {
+ this_icp->rm_action |= XICS_RM_REJECT;
+ this_icp->rm_reject = reject;
+ }
+
+ /* Pass resends to virtual mode */
+ if (resend)
+ this_icp->rm_action |= XICS_RM_CHECK_RESEND;
+
+ return check_too_hard(xics, this_icp);
+}
+
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 reject;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /*
+ * ICP State: Set_CPPR
+ *
+ * We can safely compare the new value with the current
+ * value outside of the transaction as the CPPR is only
+ * ever changed by the processor on itself
+ */
+ if (cppr > icp->state.cppr) {
+ icp_rm_down_cppr(xics, icp, cppr);
+ goto bail;
+ } else if (cppr == icp->state.cppr)
+ return H_SUCCESS;
+
+ /*
+ * ICP State: Up_CPPR
+ *
+ * The processor is raising its priority, this can result
+ * in a rejection of a pending interrupt:
+ *
+ * ICP State: Reject_Current
+ *
+ * We can remove EE from the current processor, the update
+ * transaction will set it again if needed
+ */
+ icp_rm_clr_vcpu_irq(icp->vcpu);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+
+ if (cppr <= new_state.pending_pri) {
+ reject = new_state.xisr;
+ new_state.xisr = 0;
+ new_state.pending_pri = 0xff;
+ }
+
+ } while (!icp_rm_try_update(icp, old_state, new_state));
+
+ /* Pass rejects to virtual mode */
+ if (reject && reject != XICS_IPI) {
+ icp->rm_action |= XICS_RM_REJECT;
+ icp->rm_reject = reject;
+ }
+ bail:
+ return check_too_hard(xics, icp);
+}
+
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u32 irq = xirr & 0x00ffffff;
+ u16 src;
+
+ if (!xics || !xics->real_mode)
+ return H_TOO_HARD;
+
+ /*
+ * ICP State: EOI
+ *
+ * Note: If EOI is incorrectly used by SW to lower the CPPR
+ * value (ie more favored), we do not check for rejection of
+ * a pending interrupt, this is a SW error and PAPR sepcifies
+ * that we don't have to deal with it.
+ *
+ * The sending of an EOI to the ICS is handled after the
+ * CPPR update
+ *
+ * ICP State: Down_CPPR which we handle
+ * in a separate function as it's shared with H_CPPR.
+ */
+ icp_rm_down_cppr(xics, icp, xirr >> 24);
+
+ /* IPIs have no EOI */
+ if (irq == XICS_IPI)
+ goto bail;
+ /*
+ * EOI handling: If the interrupt is still asserted, we need to
+ * resend it. We can take a lockless "peek" at the ICS state here.
+ *
+ * "Message" interrupts will never have "asserted" set
+ */
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ goto bail;
+ state = &ics->irq_state[src];
+
+ /* Still asserted, resend it, we make it look like a reject */
+ if (state->asserted) {
+ icp->rm_action |= XICS_RM_REJECT;
+ icp->rm_reject = irq;
+ }
+ bail:
+ return check_too_hard(xics, icp);
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 10b6c35..b02f91e 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,10 +79,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
* *
*****************************************************************************/
-#define XICS_XIRR 4
-#define XICS_QIRR 0xc
-#define XICS_IPI 2 /* interrupt source # for IPIs */
-
/*
* We come in here when wakened from nap mode on a secondary hw thread.
* Relocation is off and most register values are lost.
@@ -101,50 +97,51 @@ kvm_start_guest:
li r0,1
stb r0,PACA_NAPSTATELOST(r13)
- /* get vcpu pointer, NULL if we have no vcpu to run */
- ld r4,HSTATE_KVM_VCPU(r13)
- cmpdi cr1,r4,0
+ /* were we napping due to cede? */
+ lbz r0,HSTATE_NAPPING(r13)
+ cmpwi r0,0
+ bne kvm_end_cede
+
+ /*
+ * We weren't napping due to cede, so this must be a secondary
+ * thread being woken up to run a guest, or being woken up due
+ * to a stray IPI. (Or due to some machine check or hypervisor
+ * maintenance interrupt while the core is in KVM.)
+ */
/* Check the wake reason in SRR1 to see why we got here */
mfspr r3,SPRN_SRR1
rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
cmpwi r3,4 /* was it an external interrupt? */
- bne 27f
-
- /*
- * External interrupt - for now assume it is an IPI, since we
- * should never get any other interrupts sent to offline threads.
- * Only do this for secondary threads.
- */
- beq cr1,25f
- lwz r3,VCPU_PTID(r4)
- cmpwi r3,0
- beq 27f
-25: ld r5,HSTATE_XICS_PHYS(r13)
- li r0,0xff
- li r6,XICS_QIRR
- li r7,XICS_XIRR
+ bne 27f /* if not */
+ ld r5,HSTATE_XICS_PHYS(r13)
+ li r7,XICS_XIRR /* if it was an external interrupt, */
lwzcix r8,r5,r7 /* get and ack the interrupt */
sync
clrldi. r9,r8,40 /* get interrupt source ID. */
- beq 27f /* none there? */
- cmpwi r9,XICS_IPI
- bne 26f
+ beq 28f /* none there? */
+ cmpwi r9,XICS_IPI /* was it an IPI? */
+ bne 29f
+ li r0,0xff
+ li r6,XICS_MFRR
stbcix r0,r5,r6 /* clear IPI */
-26: stwcix r8,r5,r7 /* EOI the interrupt */
-
-27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+ stwcix r8,r5,r7 /* EOI the interrupt */
+ sync /* order loading of vcpu after that */
- /* reload vcpu pointer after clearing the IPI */
+ /* get vcpu pointer, NULL if we have no vcpu to run */
ld r4,HSTATE_KVM_VCPU(r13)
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
+ b kvmppc_hv_entry
- /* were we napping due to cede? */
- lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+ b kvm_no_guest
+28: /* SRR1 said external but ICP said nope?? */
+ b kvm_no_guest
+29: /* External non-IPI interrupt to offline secondary thread? help?? */
+ stw r8,HSTATE_SAVED_XIRR(r13)
+ b kvm_no_guest
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -260,6 +257,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
lwz r5, LPPACA_YIELDCOUNT(r3)
addi r5, r5, 1
stw r5, LPPACA_YIELDCOUNT(r3)
+ li r6, 1
+ stb r6, VCPU_VPA_DIRTY(r4)
25:
/* Load up DAR and DSISR */
ld r5, VCPU_DAR(r4)
@@ -485,20 +484,20 @@ toc_tlbie_lock:
mtctr r6
mtxer r7
+ ld r10, VCPU_PC(r4)
+ ld r11, VCPU_MSR(r4)
kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
ld r6, VCPU_SRR0(r4)
ld r7, VCPU_SRR1(r4)
- ld r10, VCPU_PC(r4)
- ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */
+ /* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
ori r11, r11, MSR_ME
/* Check if we can deliver an external or decrementer interrupt now */
ld r0,VCPU_PENDING_EXC(r4)
- li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
- oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
and r0,r0,r8
cmpdi cr1,r0,0
andi. r0,r11,MSR_EE
@@ -526,10 +525,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Move SRR0 and SRR1 into the respective regs */
5: mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
- li r0,0
- stb r0,VCPU_CEDED(r4) /* cancel cede */
fast_guest_return:
+ li r0,0
+ stb r0,VCPU_CEDED(r4) /* cancel cede */
mtspr SPRN_HSRR0,r10
mtspr SPRN_HSRR1,r11
@@ -539,6 +538,11 @@ fast_guest_return:
/* Enter guest */
+BEGIN_FTR_SECTION
+ ld r5, VCPU_CFAR(r4)
+ mtspr SPRN_CFAR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+
ld r5, VCPU_LR(r4)
lwz r6, VCPU_CR(r4)
mtlr r5
@@ -604,6 +608,10 @@ kvmppc_interrupt:
lwz r4, HSTATE_SCRATCH1(r13)
std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9)
+BEGIN_FTR_SECTION
+ ld r3, HSTATE_CFAR(r13)
+ std r3, VCPU_CFAR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
/* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13)
@@ -667,17 +675,99 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
- /* Check for mediated interrupts (could be done earlier really ...) */
+ /* Only handle external interrupts here on arch 206 and later */
BEGIN_FTR_SECTION
- cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
- bne+ 1f
- andi. r0,r11,MSR_EE
- beq 1f
- mfspr r5,SPRN_LPCR
- andi. r0,r5,LPCR_MER
- bne bounce_ext_interrupt
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ b ext_interrupt_to_host
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
+
+ /* External interrupt ? */
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ bne+ ext_interrupt_to_host
+
+ /* External interrupt, first check for host_ipi. If this is
+ * set, we know the host wants us out so let's do it now
+ */
+do_ext_interrupt:
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne ext_interrupt_to_host
+
+ /* Now read the interrupt from the ICP */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r7, XICS_XIRR
+ cmpdi r5, 0
+ beq- ext_interrupt_to_host
+ lwzcix r3, r5, r7
+ rlwinm. r0, r3, 0, 0xffffff
+ sync
+ beq 3f /* if nothing pending in the ICP */
+
+ /* We found something in the ICP...
+ *
+ * If it's not an IPI, stash it in the PACA and return to
+ * the host, we don't (yet) handle directing real external
+ * interrupts directly to the guest
+ */
+ cmpwi r0, XICS_IPI
+ bne ext_stash_for_host
+
+ /* It's an IPI, clear the MFRR and EOI it */
+ li r0, 0xff
+ li r6, XICS_MFRR
+ stbcix r0, r5, r6 /* clear the IPI */
+ stwcix r3, r5, r7 /* EOI it */
+ sync
+
+ /* We need to re-check host IPI now in case it got set in the
+ * meantime. If it's clear, we bounce the interrupt to the
+ * guest
+ */
+ lbz r0, HSTATE_HOST_IPI(r13)
+ cmpwi r0, 0
+ bne- 1f
+
+ /* Allright, looks like an IPI for the guest, we need to set MER */
+3:
+ /* Check if any CPU is heading out to the host, if so head out too */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lwz r0, VCORE_ENTRY_EXIT(r5)
+ cmpwi r0, 0x100
+ bge ext_interrupt_to_host
+
+ /* See if there is a pending interrupt for the guest */
+ mfspr r8, SPRN_LPCR
+ ld r0, VCPU_PENDING_EXC(r9)
+ /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
+ rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
+ rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
+ beq 2f
+
+ /* And if the guest EE is set, we can deliver immediately, else
+ * we return to the guest with MER set
+ */
+ andi. r0, r11, MSR_EE
+ beq 2f
+ mtspr SPRN_SRR0, r10
+ mtspr SPRN_SRR1, r11
+ li r10, BOOK3S_INTERRUPT_EXTERNAL
+ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
+ rotldi r11, r11, 63
+2: mr r4, r9
+ mtspr SPRN_LPCR, r8
+ b fast_guest_return
+
+ /* We raced with the host, we need to resend that IPI, bummer */
+1: li r0, IPI_PRIORITY
+ stbcix r0, r5, r6 /* set the IPI */
+ sync
+ b ext_interrupt_to_host
+
+ext_stash_for_host:
+ /* It's not an IPI and it's for the host, stash it in the PACA
+ * before exit, it will be picked up by the host ICP driver
+ */
+ stw r3, HSTATE_SAVED_XIRR(r13)
+ext_interrupt_to_host:
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */
@@ -820,7 +910,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
beq 44f
ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
li r0,IPI_PRIORITY
- li r7,XICS_QIRR
+ li r7,XICS_MFRR
stbcix r0,r7,r8 /* trigger the IPI */
44: srdi. r3,r3,1
addi r6,r6,PACA_SIZE
@@ -1009,6 +1099,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
lwz r3, LPPACA_YIELDCOUNT(r8)
addi r3, r3, 1
stw r3, LPPACA_YIELDCOUNT(r8)
+ li r3, 1
+ stb r3, VCPU_VPA_DIRTY(r9)
25:
/* Save PMU registers if requested */
/* r8 and cr0.eq are live here */
@@ -1341,11 +1433,19 @@ hcall_real_table:
.long 0 /* 0x58 */
.long 0 /* 0x5c */
.long 0 /* 0x60 */
- .long 0 /* 0x64 */
- .long 0 /* 0x68 */
- .long 0 /* 0x6c */
- .long 0 /* 0x70 */
- .long 0 /* 0x74 */
+#ifdef CONFIG_KVM_XICS
+ .long .kvmppc_rm_h_eoi - hcall_real_table
+ .long .kvmppc_rm_h_cppr - hcall_real_table
+ .long .kvmppc_rm_h_ipi - hcall_real_table
+ .long 0 /* 0x70 - H_IPOLL */
+ .long .kvmppc_rm_h_xirr - hcall_real_table
+#else
+ .long 0 /* 0x64 - H_EOI */
+ .long 0 /* 0x68 - H_CPPR */
+ .long 0 /* 0x6c - H_IPI */
+ .long 0 /* 0x70 - H_IPOLL */
+ .long 0 /* 0x74 - H_XIRR */
+#endif
.long 0 /* 0x78 */
.long 0 /* 0x7c */
.long 0 /* 0x80 */
@@ -1396,15 +1496,6 @@ ignore_hdec:
mr r4,r9
b fast_guest_return
-bounce_ext_interrupt:
- mr r4,r9
- mtspr SPRN_SRR0,r10
- mtspr SPRN_SRR1,r11
- li r10,BOOK3S_INTERRUPT_EXTERNAL
- li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11,r11,63
- b fast_guest_return
-
_GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3)
/* Work around P7 bug where DABR can get corrupted on mtspr */
@@ -1510,6 +1601,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
b .
kvm_end_cede:
+ /* get vcpu pointer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
@@ -1549,6 +1643,16 @@ kvm_end_cede:
li r0,0
stb r0,HSTATE_NAPPING(r13)
+ /* Check the wake reason in SRR1 to see why we got here */
+ mfspr r3, SPRN_SRR1
+ rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
+ cmpwi r3, 4 /* was it an external interrupt? */
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+ mr r9, r4
+ ld r10, VCPU_PC(r9)
+ ld r11, VCPU_MSR(r9)
+ beq do_ext_interrupt /* if so */
+
/* see if any other thread is already exiting */
lwz r0,VCORE_ENTRY_EXIT(r5)
cmpwi r0,0x100
@@ -1568,8 +1672,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
- li r3,H_TOO_HARD
- blr
+ b hcall_real_fallback
/* Try to handle a machine check in real mode */
machine_check_realmode:
@@ -1617,7 +1720,7 @@ secondary_nap:
beq 37f
sync
li r0, 0xff
- li r6, XICS_QIRR
+ li r6, XICS_MFRR
stbcix r0, r5, r6 /* clear the IPI */
stwcix r3, r5, r7 /* EOI it */
37: sync
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 2c86b0d..da8b13c 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
- struct hlist_node *node;
int i;
rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
invalidate_pte(vcpu, pte);
}
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_pte)
+ hlist_for_each_entry_rcu(pte, list, list_pte)
if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
invalidate_pte(vcpu, pte);
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
+ hlist_for_each_entry_rcu(pte, list, list_pte_long)
if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
invalidate_pte(vcpu, pte);
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL;
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_vpte)
+ hlist_for_each_entry_rcu(pte, list, list_vpte)
if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte);
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
- struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL;
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
rcu_read_lock();
/* Check the list for matching entries and invalidate */
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.vpage & vp_mask) == guest_vp)
invalidate_pte(vcpu, pte);
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- struct hlist_node *node;
struct hpte_cache *pte;
int i;
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
- hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+ hlist_for_each_entry_rcu(pte, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) &&
(pte->pte.raddr < pa_end))
invalidate_pte(vcpu, pte);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 36bf041..e61e39e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -34,6 +34,8 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
+#include <asm/firmware.h>
+#include <asm/hvcall.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
@@ -1032,7 +1034,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (!vcpu_book3s)
goto out;
- vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ vcpu_book3s->shadow_vcpu =
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
if (!vcpu_book3s->shadow_vcpu)
goto free_vcpu;
@@ -1273,7 +1275,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
}
@@ -1281,12 +1283,22 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
+static unsigned int kvm_global_user_count = 0;
+static DEFINE_SPINLOCK(kvm_global_user_count_lock);
+
int kvmppc_core_init_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
#endif
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ spin_lock(&kvm_global_user_count_lock);
+ if (++kvm_global_user_count == 1)
+ pSeries_disable_reloc_on_exc();
+ spin_unlock(&kvm_global_user_count_lock);
+ }
return 0;
}
@@ -1295,6 +1307,14 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
+
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ spin_lock(&kvm_global_user_count_lock);
+ BUG_ON(kvm_global_user_count == 0);
+ if (--kvm_global_user_count == 0)
+ pSeries_enable_reloc_on_exc();
+ spin_unlock(&kvm_global_user_count_lock);
+ }
}
static int kvmppc_book3s_init(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index ee02b30..da0e0bc 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -227,6 +227,13 @@ static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+{
+ long rc = kvmppc_xics_hcall(vcpu, cmd);
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
@@ -246,6 +253,22 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
+ if (kvmppc_xics_enabled(vcpu))
+ return kvmppc_h_pr_xics_hcall(vcpu, cmd);
+ break;
+ case H_RTAS:
+ if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ return RESUME_HOST;
+ if (kvmppc_rtas_hcall(vcpu))
+ break;
+ kvmppc_set_gpr(vcpu, 3, 0);
+ return EMULATE_DONE;
}
return EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
new file mode 100644
index 0000000..3219ba8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/err.h>
+
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/rtas.h>
+
+#ifdef CONFIG_KVM_XICS
+static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq, server, priority;
+ int rc;
+
+ if (args->nargs != 3 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+ server = args->args[1];
+ priority = args->args[2];
+
+ rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq, server, priority;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 3) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ server = priority = 0;
+ rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
+ if (rc) {
+ rc = -3;
+ goto out;
+ }
+
+ args->rets[1] = server;
+ args->rets[2] = priority;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ rc = kvmppc_xics_int_off(vcpu->kvm, irq);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+
+static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
+{
+ u32 irq;
+ int rc;
+
+ if (args->nargs != 1 || args->nret != 1) {
+ rc = -3;
+ goto out;
+ }
+
+ irq = args->args[0];
+
+ rc = kvmppc_xics_int_on(vcpu->kvm, irq);
+ if (rc)
+ rc = -3;
+out:
+ args->rets[0] = rc;
+}
+#endif /* CONFIG_KVM_XICS */
+
+struct rtas_handler {
+ void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args);
+ char *name;
+};
+
+static struct rtas_handler rtas_handlers[] = {
+#ifdef CONFIG_KVM_XICS
+ { .name = "ibm,set-xive", .handler = kvm_rtas_set_xive },
+ { .name = "ibm,get-xive", .handler = kvm_rtas_get_xive },
+ { .name = "ibm,int-off", .handler = kvm_rtas_int_off },
+ { .name = "ibm,int-on", .handler = kvm_rtas_int_on },
+#endif
+};
+
+struct rtas_token_definition {
+ struct list_head list;
+ struct rtas_handler *handler;
+ u64 token;
+};
+
+static int rtas_name_matches(char *s1, char *s2)
+{
+ struct kvm_rtas_token_args args;
+ return !strncmp(s1, s2, sizeof(args.name));
+}
+
+static int rtas_token_undefine(struct kvm *kvm, char *name)
+{
+ struct rtas_token_definition *d, *tmp;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ if (rtas_name_matches(d->handler->name, name)) {
+ list_del(&d->list);
+ kfree(d);
+ return 0;
+ }
+ }
+
+ /* It's not an error to undefine an undefined token */
+ return 0;
+}
+
+static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
+{
+ struct rtas_token_definition *d;
+ struct rtas_handler *h = NULL;
+ bool found;
+ int i;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
+ if (d->token == token)
+ return -EEXIST;
+ }
+
+ found = false;
+ for (i = 0; i < ARRAY_SIZE(rtas_handlers); i++) {
+ h = &rtas_handlers[i];
+ if (rtas_name_matches(h->name, name)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->handler = h;
+ d->token = token;
+
+ list_add_tail(&d->list, &kvm->arch.rtas_tokens);
+
+ return 0;
+}
+
+int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
+{
+ struct kvm_rtas_token_args args;
+ int rc;
+
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+ mutex_lock(&kvm->lock);
+
+ if (args.token)
+ rc = rtas_token_define(kvm, args.name, args.token);
+ else
+ rc = rtas_token_undefine(kvm, args.name);
+
+ mutex_unlock(&kvm->lock);
+
+ return rc;
+}
+
+int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+{
+ struct rtas_token_definition *d;
+ struct rtas_args args;
+ rtas_arg_t *orig_rets;
+ gpa_t args_phys;
+ int rc;
+
+ /* r4 contains the guest physical address of the RTAS args */
+ args_phys = kvmppc_get_gpr(vcpu, 4);
+
+ rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
+ if (rc)
+ goto fail;
+
+ /*
+ * args->rets is a pointer into args->args. Now that we've
+ * copied args we need to fix it up to point into our copy,
+ * not the guest args. We also need to save the original
+ * value so we can restore it on the way out.
+ */
+ orig_rets = args.rets;
+ args.rets = &args.args[args.nargs];
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ rc = -ENOENT;
+ list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
+ if (d->token == args.token) {
+ d->handler->handler(vcpu, &args);
+ rc = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&vcpu->kvm->lock);
+
+ if (rc == 0) {
+ args.rets = orig_rets;
+ rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
+ if (rc)
+ goto fail;
+ }
+
+ return rc;
+
+fail:
+ /*
+ * We only get here if the guest has called RTAS with a bogus
+ * args pointer. That means we can't get to the args, and so we
+ * can't fail the RTAS call. So fail right out to userspace,
+ * which should kill the guest.
+ */
+ return rc;
+}
+
+void kvmppc_rtas_tokens_free(struct kvm *kvm)
+{
+ struct rtas_token_definition *d, *tmp;
+
+ lockdep_assert_held(&kvm->lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ list_del(&d->list);
+ kfree(d);
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
new file mode 100644
index 0000000..94c1dd4
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -0,0 +1,1299 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/anon_inodes.h>
+
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xics.h"
+
+#if 1
+#define XICS_DBG(fmt...) do { } while (0)
+#else
+#define XICS_DBG(fmt...) trace_printk(fmt)
+#endif
+
+#define ENABLE_REALMODE true
+#define DEBUG_REALMODE false
+
+/*
+ * LOCKING
+ * =======
+ *
+ * Each ICS has a mutex protecting the information about the IRQ
+ * sources and avoiding simultaneous deliveries if the same interrupt.
+ *
+ * ICP operations are done via a single compare & swap transaction
+ * (most ICP state fits in the union kvmppc_icp_state)
+ */
+
+/*
+ * TODO
+ * ====
+ *
+ * - To speed up resends, keep a bitmap of "resend" set bits in the
+ * ICS
+ *
+ * - Speed up server# -> ICP lookup (array ? hash table ?)
+ *
+ * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
+ * locks array to improve scalability
+ */
+
+/* -- ICS routines -- */
+
+static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq);
+
+static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
+ bool report_status)
+{
+ struct ics_irq_state *state;
+ struct kvmppc_ics *ics;
+ u16 src;
+
+ XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics) {
+ XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
+ return -EINVAL;
+ }
+ state = &ics->irq_state[src];
+ if (!state->exists)
+ return -EINVAL;
+
+ if (report_status)
+ return state->asserted;
+
+ /*
+ * We set state->asserted locklessly. This should be fine as
+ * we are the only setter, thus concurrent access is undefined
+ * to begin with.
+ */
+ if (level == KVM_INTERRUPT_SET_LEVEL)
+ state->asserted = 1;
+ else if (level == KVM_INTERRUPT_UNSET) {
+ state->asserted = 0;
+ return 0;
+ }
+
+ /* Attempt delivery */
+ icp_deliver_irq(xics, NULL, irq);
+
+ return state->asserted;
+}
+
+static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
+ struct kvmppc_icp *icp)
+{
+ int i;
+
+ mutex_lock(&ics->lock);
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct ics_irq_state *state = &ics->irq_state[i];
+
+ if (!state->resend)
+ continue;
+
+ XICS_DBG("resend %#x prio %#x\n", state->number,
+ state->priority);
+
+ mutex_unlock(&ics->lock);
+ icp_deliver_irq(xics, icp, state->number);
+ mutex_lock(&ics->lock);
+ }
+
+ mutex_unlock(&ics->lock);
+}
+
+static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
+ struct ics_irq_state *state,
+ u32 server, u32 priority, u32 saved_priority)
+{
+ bool deliver;
+
+ mutex_lock(&ics->lock);
+
+ state->server = server;
+ state->priority = priority;
+ state->saved_priority = saved_priority;
+ deliver = false;
+ if ((state->masked_pending || state->resend) && priority != MASKED) {
+ state->masked_pending = 0;
+ deliver = true;
+ }
+
+ mutex_unlock(&ics->lock);
+
+ return deliver;
+}
+
+int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ icp = kvmppc_xics_find_server(kvm, server);
+ if (!icp)
+ return -EINVAL;
+
+ XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
+ irq, server, priority,
+ state->masked_pending, state->resend);
+
+ if (write_xive(xics, ics, state, server, priority, priority))
+ icp_deliver_irq(xics, icp, irq);
+
+ return 0;
+}
+
+int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ mutex_lock(&ics->lock);
+ *server = state->server;
+ *priority = state->priority;
+ mutex_unlock(&ics->lock);
+
+ return 0;
+}
+
+int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ icp = kvmppc_xics_find_server(kvm, state->server);
+ if (!icp)
+ return -EINVAL;
+
+ if (write_xive(xics, ics, state, state->server, state->saved_priority,
+ state->saved_priority))
+ icp_deliver_irq(xics, icp, irq);
+
+ return 0;
+}
+
+int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u16 src;
+
+ if (!xics)
+ return -ENODEV;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics)
+ return -EINVAL;
+ state = &ics->irq_state[src];
+
+ write_xive(xics, ics, state, state->server, MASKED, state->priority);
+
+ return 0;
+}
+
+/* -- ICP routines, including hcalls -- */
+
+static inline bool icp_try_update(struct kvmppc_icp *icp,
+ union kvmppc_icp_state old,
+ union kvmppc_icp_state new,
+ bool change_self)
+{
+ bool success;
+
+ /* Calculate new output value */
+ new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
+
+ /* Attempt atomic update */
+ success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
+ if (!success)
+ goto bail;
+
+ XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
+ icp->server_num,
+ old.cppr, old.mfrr, old.pending_pri, old.xisr,
+ old.need_resend, old.out_ee);
+ XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
+ new.cppr, new.mfrr, new.pending_pri, new.xisr,
+ new.need_resend, new.out_ee);
+ /*
+ * Check for output state update
+ *
+ * Note that this is racy since another processor could be updating
+ * the state already. This is why we never clear the interrupt output
+ * here, we only ever set it. The clear only happens prior to doing
+ * an update and only by the processor itself. Currently we do it
+ * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
+ *
+ * We also do not try to figure out whether the EE state has changed,
+ * we unconditionally set it if the new state calls for it. The reason
+ * for that is that we opportunistically remove the pending interrupt
+ * flag when raising CPPR, so we need to set it back here if an
+ * interrupt is still pending.
+ */
+ if (new.out_ee) {
+ kvmppc_book3s_queue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ if (!change_self)
+ kvmppc_fast_vcpu_kick(icp->vcpu);
+ }
+ bail:
+ return success;
+}
+
+static void icp_check_resend(struct kvmppc_xics *xics,
+ struct kvmppc_icp *icp)
+{
+ u32 icsid;
+
+ /* Order this load with the test for need_resend in the caller */
+ smp_rmb();
+ for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
+ struct kvmppc_ics *ics = xics->ics[icsid];
+
+ if (!test_and_clear_bit(icsid, icp->resend_map))
+ continue;
+ if (!ics)
+ continue;
+ ics_check_resend(xics, ics, icp);
+ }
+}
+
+static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
+ u32 *reject)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool success;
+
+ XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
+ icp->server_num);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ *reject = 0;
+
+ /* See if we can deliver */
+ success = new_state.cppr > priority &&
+ new_state.mfrr > priority &&
+ new_state.pending_pri > priority;
+
+ /*
+ * If we can, check for a rejection and perform the
+ * delivery
+ */
+ if (success) {
+ *reject = new_state.xisr;
+ new_state.xisr = irq;
+ new_state.pending_pri = priority;
+ } else {
+ /*
+ * If we failed to deliver we set need_resend
+ * so a subsequent CPPR state change causes us
+ * to try a new delivery.
+ */
+ new_state.need_resend = true;
+ }
+
+ } while (!icp_try_update(icp, old_state, new_state, false));
+
+ return success;
+}
+
+static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u32 new_irq)
+{
+ struct ics_irq_state *state;
+ struct kvmppc_ics *ics;
+ u32 reject;
+ u16 src;
+
+ /*
+ * This is used both for initial delivery of an interrupt and
+ * for subsequent rejection.
+ *
+ * Rejection can be racy vs. resends. We have evaluated the
+ * rejection in an atomic ICP transaction which is now complete,
+ * so potentially the ICP can already accept the interrupt again.
+ *
+ * So we need to retry the delivery. Essentially the reject path
+ * boils down to a failed delivery. Always.
+ *
+ * Now the interrupt could also have moved to a different target,
+ * thus we may need to re-do the ICP lookup as well
+ */
+
+ again:
+ /* Get the ICS state and lock it */
+ ics = kvmppc_xics_find_ics(xics, new_irq, &src);
+ if (!ics) {
+ XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
+ return;
+ }
+ state = &ics->irq_state[src];
+
+ /* Get a lock on the ICS */
+ mutex_lock(&ics->lock);
+
+ /* Get our server */
+ if (!icp || state->server != icp->server_num) {
+ icp = kvmppc_xics_find_server(xics->kvm, state->server);
+ if (!icp) {
+ pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
+ new_irq, state->server);
+ goto out;
+ }
+ }
+
+ /* Clear the resend bit of that interrupt */
+ state->resend = 0;
+
+ /*
+ * If masked, bail out
+ *
+ * Note: PAPR doesn't mention anything about masked pending
+ * when doing a resend, only when doing a delivery.
+ *
+ * However that would have the effect of losing a masked
+ * interrupt that was rejected and isn't consistent with
+ * the whole masked_pending business which is about not
+ * losing interrupts that occur while masked.
+ *
+ * I don't differenciate normal deliveries and resends, this
+ * implementation will differ from PAPR and not lose such
+ * interrupts.
+ */
+ if (state->priority == MASKED) {
+ XICS_DBG("irq %#x masked pending\n", new_irq);
+ state->masked_pending = 1;
+ goto out;
+ }
+
+ /*
+ * Try the delivery, this will set the need_resend flag
+ * in the ICP as part of the atomic transaction if the
+ * delivery is not possible.
+ *
+ * Note that if successful, the new delivery might have itself
+ * rejected an interrupt that was "delivered" before we took the
+ * icp mutex.
+ *
+ * In this case we do the whole sequence all over again for the
+ * new guy. We cannot assume that the rejected interrupt is less
+ * favored than the new one, and thus doesn't need to be delivered,
+ * because by the time we exit icp_try_to_deliver() the target
+ * processor may well have alrady consumed & completed it, and thus
+ * the rejected interrupt might actually be already acceptable.
+ */
+ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
+ /*
+ * Delivery was successful, did we reject somebody else ?
+ */
+ if (reject && reject != XICS_IPI) {
+ mutex_unlock(&ics->lock);
+ new_irq = reject;
+ goto again;
+ }
+ } else {
+ /*
+ * We failed to deliver the interrupt we need to set the
+ * resend map bit and mark the ICS state as needing a resend
+ */
+ set_bit(ics->icsid, icp->resend_map);
+ state->resend = 1;
+
+ /*
+ * If the need_resend flag got cleared in the ICP some time
+ * between icp_try_to_deliver() atomic update and now, then
+ * we know it might have missed the resend_map bit. So we
+ * retry
+ */
+ smp_mb();
+ if (!icp->state.need_resend) {
+ mutex_unlock(&ics->lock);
+ goto again;
+ }
+ }
+ out:
+ mutex_unlock(&ics->lock);
+}
+
+static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ u8 new_cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ bool resend;
+
+ /*
+ * This handles several related states in one operation:
+ *
+ * ICP State: Down_CPPR
+ *
+ * Load CPPR with new value and if the XISR is 0
+ * then check for resends:
+ *
+ * ICP State: Resend
+ *
+ * If MFRR is more favored than CPPR, check for IPIs
+ * and notify ICS of a potential resend. This is done
+ * asynchronously (when used in real mode, we will have
+ * to exit here).
+ *
+ * We do not handle the complete Check_IPI as documented
+ * here. In the PAPR, this state will be used for both
+ * Set_MFRR and Down_CPPR. However, we know that we aren't
+ * changing the MFRR state here so we don't need to handle
+ * the case of an MFRR causing a reject of a pending irq,
+ * this will have been handled when the MFRR was set in the
+ * first place.
+ *
+ * Thus we don't have to handle rejects, only resends.
+ *
+ * When implementing real mode for HV KVM, resend will lead to
+ * a H_TOO_HARD return and the whole transaction will be handled
+ * in virtual mode.
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+
+ /*
+ * Cut down Resend / Check_IPI / IPI
+ *
+ * The logic is that we cannot have a pending interrupt
+ * trumped by an IPI at this point (see above), so we
+ * know that either the pending interrupt is already an
+ * IPI (in which case we don't care to override it) or
+ * it's either more favored than us or non existent
+ */
+ if (new_state.mfrr < new_cppr &&
+ new_state.mfrr <= new_state.pending_pri) {
+ WARN_ON(new_state.xisr != XICS_IPI &&
+ new_state.xisr != 0);
+ new_state.pending_pri = new_state.mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ /* Latch/clear resend bit */
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ /*
+ * Now handle resend checks. Those are asynchronous to the ICP
+ * state update in HW (ie bus transactions) so we can handle them
+ * separately here too
+ */
+ if (resend)
+ icp_check_resend(xics, icp);
+}
+
+static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 xirr;
+
+ /* First, remove EE from the processor */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ /*
+ * ICP State: Accept_Interrupt
+ *
+ * Return the pending interrupt (if any) along with the
+ * current CPPR, then clear the XISR & set CPPR to the
+ * pending priority
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+ break;
+ new_state.cppr = new_state.pending_pri;
+ new_state.pending_pri = 0xff;
+ new_state.xisr = 0;
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
+
+ return xirr;
+}
+
+static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp;
+ u32 reject;
+ bool resend;
+ bool local;
+
+ XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
+ vcpu->vcpu_id, server, mfrr);
+
+ icp = vcpu->arch.icp;
+ local = icp->server_num == server;
+ if (!local) {
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+ }
+
+ /*
+ * ICP state: Set_MFRR
+ *
+ * If the CPPR is more favored than the new MFRR, then
+ * nothing needs to be rejected as there can be no XISR to
+ * reject. If the MFRR is being made less favored then
+ * there might be a previously-rejected interrupt needing
+ * to be resent.
+ *
+ * If the CPPR is less favored, then we might be replacing
+ * an interrupt, and thus need to possibly reject it as in
+ *
+ * ICP state: Check_IPI
+ */
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+
+ /* Check_IPI */
+ reject = 0;
+ resend = false;
+ if (mfrr < new_state.cppr) {
+ /* Reject a pending interrupt if not an IPI */
+ if (mfrr <= new_state.pending_pri)
+ reject = new_state.xisr;
+ new_state.pending_pri = mfrr;
+ new_state.xisr = XICS_IPI;
+ }
+
+ if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
+ resend = new_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_try_update(icp, old_state, new_state, local));
+
+ /* Handle reject */
+ if (reject && reject != XICS_IPI)
+ icp_deliver_irq(xics, icp, reject);
+
+ /* Handle resend */
+ if (resend)
+ icp_check_resend(xics, icp);
+
+ return H_SUCCESS;
+}
+
+static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ union kvmppc_icp_state state;
+ struct kvmppc_icp *icp;
+
+ icp = vcpu->arch.icp;
+ if (icp->server_num != server) {
+ icp = kvmppc_xics_find_server(vcpu->kvm, server);
+ if (!icp)
+ return H_PARAMETER;
+ }
+ state = ACCESS_ONCE(icp->state);
+ kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+ kvmppc_set_gpr(vcpu, 5, state.mfrr);
+ return H_SUCCESS;
+}
+
+static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ u32 reject;
+
+ XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
+
+ /*
+ * ICP State: Set_CPPR
+ *
+ * We can safely compare the new value with the current
+ * value outside of the transaction as the CPPR is only
+ * ever changed by the processor on itself
+ */
+ if (cppr > icp->state.cppr)
+ icp_down_cppr(xics, icp, cppr);
+ else if (cppr == icp->state.cppr)
+ return;
+
+ /*
+ * ICP State: Up_CPPR
+ *
+ * The processor is raising its priority, this can result
+ * in a rejection of a pending interrupt:
+ *
+ * ICP State: Reject_Current
+ *
+ * We can remove EE from the current processor, the update
+ * transaction will set it again if needed
+ */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ do {
+ old_state = new_state = ACCESS_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+
+ if (cppr <= new_state.pending_pri) {
+ reject = new_state.xisr;
+ new_state.xisr = 0;
+ new_state.pending_pri = 0xff;
+ }
+
+ } while (!icp_try_update(icp, old_state, new_state, true));
+
+ /*
+ * Check for rejects. They are handled by doing a new delivery
+ * attempt (see comments in icp_deliver_irq).
+ */
+ if (reject && reject != XICS_IPI)
+ icp_deliver_irq(xics, icp, reject);
+}
+
+static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *state;
+ u32 irq = xirr & 0x00ffffff;
+ u16 src;
+
+ XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
+
+ /*
+ * ICP State: EOI
+ *
+ * Note: If EOI is incorrectly used by SW to lower the CPPR
+ * value (ie more favored), we do not check for rejection of
+ * a pending interrupt, this is a SW error and PAPR sepcifies
+ * that we don't have to deal with it.
+ *
+ * The sending of an EOI to the ICS is handled after the
+ * CPPR update
+ *
+ * ICP State: Down_CPPR which we handle
+ * in a separate function as it's shared with H_CPPR.
+ */
+ icp_down_cppr(xics, icp, xirr >> 24);
+
+ /* IPIs have no EOI */
+ if (irq == XICS_IPI)
+ return H_SUCCESS;
+ /*
+ * EOI handling: If the interrupt is still asserted, we need to
+ * resend it. We can take a lockless "peek" at the ICS state here.
+ *
+ * "Message" interrupts will never have "asserted" set
+ */
+ ics = kvmppc_xics_find_ics(xics, irq, &src);
+ if (!ics) {
+ XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
+ return H_PARAMETER;
+ }
+ state = &ics->irq_state[src];
+
+ /* Still asserted, resend it */
+ if (state->asserted)
+ icp_deliver_irq(xics, icp, irq);
+
+ return H_SUCCESS;
+}
+
+static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+
+ XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
+ hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
+
+ if (icp->rm_action & XICS_RM_KICK_VCPU)
+ kvmppc_fast_vcpu_kick(icp->rm_kick_target);
+ if (icp->rm_action & XICS_RM_CHECK_RESEND)
+ icp_check_resend(xics, icp);
+ if (icp->rm_action & XICS_RM_REJECT)
+ icp_deliver_irq(xics, icp, icp->rm_reject);
+
+ icp->rm_action = 0;
+
+ return H_SUCCESS;
+}
+
+int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ unsigned long res;
+ int rc = H_SUCCESS;
+
+ /* Check if we have an ICP */
+ if (!xics || !vcpu->arch.icp)
+ return H_HARDWARE;
+
+ /* These requests don't have real-mode implementations at present */
+ switch (req) {
+ case H_XIRR_X:
+ res = kvmppc_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 4, res);
+ kvmppc_set_gpr(vcpu, 5, get_tb());
+ return rc;
+ case H_IPOLL:
+ rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ return rc;
+ }
+
+ /* Check for real mode returning too hard */
+ if (xics->real_mode)
+ return kvmppc_xics_rm_complete(vcpu, req);
+
+ switch (req) {
+ case H_XIRR:
+ res = kvmppc_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 4, res);
+ break;
+ case H_CPPR:
+ kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_EOI:
+ rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_IPI:
+ rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ break;
+ }
+
+ return rc;
+}
+
+
+/* -- Initialisation code etc. -- */
+
+static int xics_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xics *xics = m->private;
+ struct kvm *kvm = xics->kvm;
+ struct kvm_vcpu *vcpu;
+ int icsid, i;
+
+ if (!kvm)
+ return 0;
+
+ seq_printf(m, "=========\nICP state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ union kvmppc_icp_state state;
+
+ if (!icp)
+ continue;
+
+ state.raw = ACCESS_ONCE(icp->state.raw);
+ seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
+ icp->server_num, state.xisr,
+ state.pending_pri, state.cppr, state.mfrr,
+ state.out_ee, state.need_resend);
+ }
+
+ for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
+ struct kvmppc_ics *ics = xics->ics[icsid];
+
+ if (!ics)
+ continue;
+
+ seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
+ icsid);
+
+ mutex_lock(&ics->lock);
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct ics_irq_state *irq = &ics->irq_state[i];
+
+ seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
+ irq->number, irq->server, irq->priority,
+ irq->saved_priority, irq->asserted,
+ irq->resend, irq->masked_pending);
+
+ }
+ mutex_unlock(&ics->lock);
+ }
+ return 0;
+}
+
+static int xics_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xics_debug_show, inode->i_private);
+}
+
+static const struct file_operations xics_debug_fops = {
+ .open = xics_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xics_debugfs_init(struct kvmppc_xics *xics)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xics, &xics_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
+ struct kvmppc_xics *xics, int irq)
+{
+ struct kvmppc_ics *ics;
+ int i, icsid;
+
+ icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
+
+ mutex_lock(&kvm->lock);
+
+ /* ICS already exists - somebody else got here first */
+ if (xics->ics[icsid])
+ goto out;
+
+ /* Create the ICS */
+ ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
+ if (!ics)
+ goto out;
+
+ mutex_init(&ics->lock);
+ ics->icsid = icsid;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
+ ics->irq_state[i].priority = MASKED;
+ ics->irq_state[i].saved_priority = MASKED;
+ }
+ smp_wmb();
+ xics->ics[icsid] = ics;
+
+ if (icsid > xics->max_icsid)
+ xics->max_icsid = icsid;
+
+ out:
+ mutex_unlock(&kvm->lock);
+ return xics->ics[icsid];
+}
+
+int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
+{
+ struct kvmppc_icp *icp;
+
+ if (!vcpu->kvm->arch.xics)
+ return -ENODEV;
+
+ if (kvmppc_xics_find_server(vcpu->kvm, server_num))
+ return -EEXIST;
+
+ icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
+ if (!icp)
+ return -ENOMEM;
+
+ icp->vcpu = vcpu;
+ icp->server_num = server_num;
+ icp->state.mfrr = MASKED;
+ icp->state.pending_pri = MASKED;
+ vcpu->arch.icp = icp;
+
+ XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
+
+ return 0;
+}
+
+u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ union kvmppc_icp_state state;
+
+ if (!icp)
+ return 0;
+ state = icp->state;
+ return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
+ ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
+ ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
+ ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
+}
+
+int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+ struct kvmppc_icp *icp = vcpu->arch.icp;
+ struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+ union kvmppc_icp_state old_state, new_state;
+ struct kvmppc_ics *ics;
+ u8 cppr, mfrr, pending_pri;
+ u32 xisr;
+ u16 src;
+ bool resend;
+
+ if (!icp || !xics)
+ return -ENOENT;
+
+ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+ xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+ KVM_REG_PPC_ICP_XISR_MASK;
+ mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+ pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
+
+ /* Require the new state to be internally consistent */
+ if (xisr == 0) {
+ if (pending_pri != 0xff)
+ return -EINVAL;
+ } else if (xisr == XICS_IPI) {
+ if (pending_pri != mfrr || pending_pri >= cppr)
+ return -EINVAL;
+ } else {
+ if (pending_pri >= mfrr || pending_pri >= cppr)
+ return -EINVAL;
+ ics = kvmppc_xics_find_ics(xics, xisr, &src);
+ if (!ics)
+ return -EINVAL;
+ }
+
+ new_state.raw = 0;
+ new_state.cppr = cppr;
+ new_state.xisr = xisr;
+ new_state.mfrr = mfrr;
+ new_state.pending_pri = pending_pri;
+
+ /*
+ * Deassert the CPU interrupt request.
+ * icp_try_update will reassert it if necessary.
+ */
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ /*
+ * Note that if we displace an interrupt from old_state.xisr,
+ * we don't mark it as rejected. We expect userspace to set
+ * the state of the interrupt sources to be consistent with
+ * the ICP states (either before or afterwards, which doesn't
+ * matter). We do handle resends due to CPPR becoming less
+ * favoured because that is necessary to end up with a
+ * consistent state in the situation where userspace restores
+ * the ICS states before the ICP states.
+ */
+ do {
+ old_state = ACCESS_ONCE(icp->state);
+
+ if (new_state.mfrr <= old_state.mfrr) {
+ resend = false;
+ new_state.need_resend = old_state.need_resend;
+ } else {
+ resend = old_state.need_resend;
+ new_state.need_resend = 0;
+ }
+ } while (!icp_try_update(icp, old_state, new_state, false));
+
+ if (resend)
+ icp_check_resend(xics, icp);
+
+ return 0;
+}
+
+static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
+{
+ int ret;
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *irqp;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val, prio;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics)
+ return -ENOENT;
+
+ irqp = &ics->irq_state[idx];
+ mutex_lock(&ics->lock);
+ ret = -ENOENT;
+ if (irqp->exists) {
+ val = irqp->server;
+ prio = irqp->priority;
+ if (prio == MASKED) {
+ val |= KVM_XICS_MASKED;
+ prio = irqp->saved_priority;
+ }
+ val |= prio << KVM_XICS_PRIORITY_SHIFT;
+ if (irqp->asserted)
+ val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
+ else if (irqp->masked_pending || irqp->resend)
+ val |= KVM_XICS_PENDING;
+ ret = 0;
+ }
+ mutex_unlock(&ics->lock);
+
+ if (!ret && put_user(val, ubufp))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
+{
+ struct kvmppc_ics *ics;
+ struct ics_irq_state *irqp;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val;
+ u8 prio;
+ u32 server;
+
+ if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
+ return -ENOENT;
+
+ ics = kvmppc_xics_find_ics(xics, irq, &idx);
+ if (!ics) {
+ ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
+ if (!ics)
+ return -ENOMEM;
+ }
+ irqp = &ics->irq_state[idx];
+ if (get_user(val, ubufp))
+ return -EFAULT;
+
+ server = val & KVM_XICS_DESTINATION_MASK;
+ prio = val >> KVM_XICS_PRIORITY_SHIFT;
+ if (prio != MASKED &&
+ kvmppc_xics_find_server(xics->kvm, server) == NULL)
+ return -EINVAL;
+
+ mutex_lock(&ics->lock);
+ irqp->server = server;
+ irqp->saved_priority = prio;
+ if (val & KVM_XICS_MASKED)
+ prio = MASKED;
+ irqp->priority = prio;
+ irqp->resend = 0;
+ irqp->masked_pending = 0;
+ irqp->asserted = 0;
+ if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
+ irqp->asserted = 1;
+ irqp->exists = 1;
+ mutex_unlock(&ics->lock);
+
+ if (val & KVM_XICS_PENDING)
+ icp_deliver_irq(xics, NULL, irqp->number);
+
+ return 0;
+}
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ struct kvmppc_xics *xics = kvm->arch.xics;
+
+ return ics_deliver_irq(xics, irq, level, line_status);
+}
+
+static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xics *xics = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xics_set_source(xics, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xics *xics = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xics_get_source(xics, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
+ attr->attr < KVMPPC_XICS_NR_IRQS)
+ return 0;
+ break;
+ }
+ return -ENXIO;
+}
+
+static void kvmppc_xics_free(struct kvm_device *dev)
+{
+ struct kvmppc_xics *xics = dev->private;
+ int i;
+ struct kvm *kvm = xics->kvm;
+
+ debugfs_remove(xics->dentry);
+
+ if (kvm)
+ kvm->arch.xics = NULL;
+
+ for (i = 0; i <= xics->max_icsid; i++)
+ kfree(xics->ics[i]);
+ kfree(xics);
+ kfree(dev);
+}
+
+static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xics *xics;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ xics = kzalloc(sizeof(*xics), GFP_KERNEL);
+ if (!xics)
+ return -ENOMEM;
+
+ dev->private = xics;
+ xics->dev = dev;
+ xics->kvm = kvm;
+
+ /* Already there ? */
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.xics)
+ ret = -EEXIST;
+ else
+ kvm->arch.xics = xics;
+ mutex_unlock(&kvm->lock);
+
+ if (ret)
+ return ret;
+
+ xics_debugfs_init(xics);
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ /* Enable real mode support */
+ xics->real_mode = ENABLE_REALMODE;
+ xics->real_mode_dbg = DEBUG_REALMODE;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+ return 0;
+}
+
+struct kvm_device_ops kvm_xics_ops = {
+ .name = "kvm-xics",
+ .create = kvmppc_xics_create,
+ .destroy = kvmppc_xics_free,
+ .set_attr = xics_set_attr,
+ .get_attr = xics_get_attr,
+ .has_attr = xics_has_attr,
+};
+
+int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 xcpu)
+{
+ struct kvmppc_xics *xics = dev->private;
+ int r = -EBUSY;
+
+ if (dev->ops != &kvm_xics_ops)
+ return -EPERM;
+ if (xics->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type)
+ return -EBUSY;
+
+ r = kvmppc_xics_create_icp(vcpu, xcpu);
+ if (!r)
+ vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
+
+ return r;
+}
+
+void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->arch.icp)
+ return;
+ kfree(vcpu->arch.icp);
+ vcpu->arch.icp = NULL;
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+}
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
new file mode 100644
index 0000000..dd9326c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Michael Ellerman, IBM Corporation.
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KVM_PPC_BOOK3S_XICS_H
+#define _KVM_PPC_BOOK3S_XICS_H
+
+/*
+ * We use a two-level tree to store interrupt source information.
+ * There are up to 1024 ICS nodes, each of which can represent
+ * 1024 sources.
+ */
+#define KVMPPC_XICS_MAX_ICS_ID 1023
+#define KVMPPC_XICS_ICS_SHIFT 10
+#define KVMPPC_XICS_IRQ_PER_ICS (1 << KVMPPC_XICS_ICS_SHIFT)
+#define KVMPPC_XICS_SRC_MASK (KVMPPC_XICS_IRQ_PER_ICS - 1)
+
+/*
+ * Interrupt source numbers below this are reserved, for example
+ * 0 is "no interrupt", and 2 is used for IPIs.
+ */
+#define KVMPPC_XICS_FIRST_IRQ 16
+#define KVMPPC_XICS_NR_IRQS ((KVMPPC_XICS_MAX_ICS_ID + 1) * \
+ KVMPPC_XICS_IRQ_PER_ICS)
+
+/* Priority value to use for disabling an interrupt */
+#define MASKED 0xff
+
+/* State for one irq source */
+struct ics_irq_state {
+ u32 number;
+ u32 server;
+ u8 priority;
+ u8 saved_priority;
+ u8 resend;
+ u8 masked_pending;
+ u8 asserted; /* Only for LSI */
+ u8 exists;
+};
+
+/* Atomic ICP state, updated with a single compare & swap */
+union kvmppc_icp_state {
+ unsigned long raw;
+ struct {
+ u8 out_ee:1;
+ u8 need_resend:1;
+ u8 cppr;
+ u8 mfrr;
+ u8 pending_pri;
+ u32 xisr;
+ };
+};
+
+/* One bit per ICS */
+#define ICP_RESEND_MAP_SIZE (KVMPPC_XICS_MAX_ICS_ID / BITS_PER_LONG + 1)
+
+struct kvmppc_icp {
+ struct kvm_vcpu *vcpu;
+ unsigned long server_num;
+ union kvmppc_icp_state state;
+ unsigned long resend_map[ICP_RESEND_MAP_SIZE];
+
+ /* Real mode might find something too hard, here's the action
+ * it might request from virtual mode
+ */
+#define XICS_RM_KICK_VCPU 0x1
+#define XICS_RM_CHECK_RESEND 0x2
+#define XICS_RM_REJECT 0x4
+ u32 rm_action;
+ struct kvm_vcpu *rm_kick_target;
+ u32 rm_reject;
+
+ /* Debug stuff for real mode */
+ union kvmppc_icp_state rm_dbgstate;
+ struct kvm_vcpu *rm_dbgtgt;
+};
+
+struct kvmppc_ics {
+ struct mutex lock;
+ u16 icsid;
+ struct ics_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
+};
+
+struct kvmppc_xics {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct dentry *dentry;
+ u32 max_icsid;
+ bool real_mode;
+ bool real_mode_dbg;
+ struct kvmppc_ics *ics[KVMPPC_XICS_MAX_ICS_ID + 1];
+};
+
+static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm,
+ u32 nr)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
+ return vcpu->arch.icp;
+ }
+ return NULL;
+}
+
+static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
+ u32 irq, u16 *source)
+{
+ u32 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
+ u16 src = irq & KVMPPC_XICS_SRC_MASK;
+ struct kvmppc_ics *ics;
+
+ if (source)
+ *source = src;
+ if (icsid > KVMPPC_XICS_MAX_ICS_ID)
+ return NULL;
+ ics = xics->ics[icsid];
+ if (!ics)
+ return NULL;
+ return ics;
+}
+
+
+#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0aa7d24..d2579d8 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -743,7 +743,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = s;
goto out;
}
- kvmppc_fix_ee_before_entry();
kvm_guest_enter();
@@ -796,6 +795,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.dbcr0 = 0;
kvmppc_booke_vcpu_load_debug_regs(vcpu);
+ kvmppc_fix_ee_before_entry();
+
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -1771,7 +1772,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old)
+ const struct kvm_memory_slot *old)
{
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index eab3914..10f8b20 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -109,6 +109,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -149,8 +151,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+ __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
+ __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+ }
kvmppc_load_guest_fp(vcpu);
kvmppc_load_guest_altivec(vcpu);
diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h
index f1e27fd..5a9a10b 100644
--- a/arch/powerpc/kvm/irq.h
+++ b/arch/powerpc/kvm/irq.h
@@ -10,6 +10,9 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
#ifdef CONFIG_KVM_MPIC
ret = ret || (kvm->arch.mpic != NULL);
#endif
+#ifdef CONFIG_KVM_XICS
+ ret = ret || (kvm->arch.xics != NULL);
+#endif
smp_rmb();
return ret;
}
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 46b0a84..2861ae9 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1788,7 +1788,8 @@ void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
* > 0 Number of CPUs interrupt was delivered to
*/
static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level)
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
{
u32 irq = e->irqchip.pin;
struct openpic *opp = kvm->arch.mpic;
@@ -1803,7 +1804,7 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level)
+ struct kvm *kvm, int irq_source_id, int level, bool line_status)
{
struct openpic *opp = kvm->arch.mpic;
unsigned long flags;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 40071a4..2f7a221 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -336,6 +336,10 @@ int kvm_dev_ioctl_check_extension(long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
+ case KVM_CAP_PPC_RTAS:
+#ifdef CONFIG_KVM_XICS
+ case KVM_CAP_IRQ_XICS:
+#endif
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -412,18 +416,17 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change)
{
kvmppc_core_commit_memory_region(kvm, mem, old);
}
@@ -466,6 +469,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
case KVMPPC_IRQ_MPIC:
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
break;
+ case KVMPPC_IRQ_XICS:
+ kvmppc_xics_free_icp(vcpu);
+ break;
}
kvmppc_core_vcpu_free(vcpu);
@@ -829,6 +835,25 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
}
#endif
+#ifdef CONFIG_KVM_XICS
+ case KVM_CAP_IRQ_XICS: {
+ struct file *filp;
+ struct kvm_device *dev;
+
+ r = -EBADF;
+ filp = fget(cap->args[0]);
+ if (!filp)
+ break;
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(filp);
+ if (dev)
+ r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+
+ fput(filp);
+ break;
+ }
+#endif /* CONFIG_KVM_XICS */
default:
r = -EINVAL;
break;
@@ -951,13 +976,15 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
return 0;
}
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+ bool line_status)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
- irq_event->irq, irq_event->level);
+ irq_event->irq, irq_event->level,
+ line_status);
return 0;
}
@@ -995,6 +1022,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: {
struct kvm_allocate_rma rma;
+ struct kvm *kvm = filp->private_data;
r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
@@ -1039,6 +1067,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
break;
}
+ case KVM_PPC_RTAS_DEFINE_TOKEN: {
+ struct kvm *kvm = filp->private_data;
+
+ r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
+ break;
+ }
#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index fa48a4c..4504332 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
checksum_wrappers_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o \
memcpy_power7.o
-obj-$(CONFIG_XMON) += sstep.o ldstfp.o
-obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
+obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index 0ef75bf..395c594 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
* aligned we don't need to clear the bottom 7 bits of either
* address.
*/
- ori r9,r3,1 /* stream=1 */
+ ori r9,r3,1 /* stream=1 => to */
#ifdef CONFIG_PPC_64K_PAGES
- lis r7,0x0E01 /* depth=7, units=512 */
+ lis r7,0x0E01 /* depth=7
+ * units/cachelines=512 */
#else
lis r7,0x0E00 /* depth=7 */
- ori r7,r7,0x1000 /* units=32 */
+ ori r7,r7,0x1000 /* units/cachelines=32 */
#endif
ori r10,r7,1 /* stream=1 */
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
.machine push
.machine "power4"
- dcbt r0,r4,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r4,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 0d24ff1..d1f1179 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -318,12 +318,14 @@ err1; stb r0,0(r3)
.machine push
.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ /* setup read stream 0 */
+ dcbt r0,r6,0b01000 /* addr from */
+ dcbt r0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+ dcbtst r0,r9,0b01000 /* addr to */
+ dcbtst r0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt r0,r8,0b01010 /* all streams GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 3a8489a..8726779 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,7 @@
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
+ enum ctx_state prev_state = exception_enter();
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
int trap = TRAP(regs);
int is_exec = trap == 0x400;
int fault;
+ int rc = 0;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* look at it
*/
if (error_code & ICSWX_DSI_UCT) {
- int rc = acop_handle_fault(regs, address, error_code);
+ rc = acop_handle_fault(regs, address, error_code);
if (rc)
- return rc;
+ goto bail;
}
#endif /* CONFIG_PPC_ICSWX */
if (notify_page_fault(regs))
- return 0;
+ goto bail;
if (unlikely(debugger_fault_handler(regs)))
- return 0;
+ goto bail;
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (!user_mode(regs) && (address >= TASK_SIZE))
- return SIGSEGV;
+ if (!user_mode(regs) && (address >= TASK_SIZE)) {
+ rc = SIGSEGV;
+ goto bail;
+ }
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
- /* DABR match */
- do_dabr(regs, address, error_code);
- return 0;
+ /* breakpoint match */
+ do_break(regs, address, error_code);
+ goto bail;
}
#endif
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
local_irq_enable();
if (in_atomic() || mm == NULL) {
- if (!user_mode(regs))
- return SIGSEGV;
+ if (!user_mode(regs)) {
+ rc = SIGSEGV;
+ goto bail;
+ }
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
- int rc = mm_fault_error(regs, address, fault);
+ rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
- return rc;
+ goto bail;
+ else
+ rc = 0;
}
/*
@@ -454,7 +463,7 @@ good_area:
}
up_read(&mm->mmap_sem);
- return 0;
+ goto bail;
bad_area:
up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
- return 0;
+ goto bail;
}
if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ bad_area_nosemaphore:
" page (%lx) - exploit attempt? (uid: %d)\n",
address, from_kuid(&init_user_ns, current_uid()));
- return SIGSEGV;
+ rc = SIGSEGV;
+
+bail:
+ exception_exit(prev_state);
+ return rc;
}
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index d7efdbf..4b921af 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -68,7 +68,11 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
- if (is_hugepd(pmdp)) {
+ if (pmd_huge(pmd)) {
+ if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (is_hugepd(pmdp)) {
if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
addr, next, write, pages, nr))
return 0;
@@ -92,7 +96,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (is_hugepd(pudp)) {
+ if (pud_huge(pud)) {
+ if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
+ write, pages, nr))
+ return 0;
+ } else if (is_hugepd(pudp)) {
if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
addr, next, write, pages, nr))
return 0;
@@ -153,7 +161,11 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
- if (is_hugepd(pgdp)) {
+ if (pgd_huge(pgd)) {
+ if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
+ write, pages, &nr))
+ goto slow;
+ } else if (is_hugepd(pgdp)) {
if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
addr, next, write, pages, &nr))
goto slow;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 7443481..0e980ac 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -196,7 +196,8 @@ htab_insert_pte:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -219,7 +220,8 @@ _GLOBAL(htab_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -490,7 +492,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
beq htab_inval_old_hpte
ld r6,STK_PARAM(R6)(r1)
- ori r26,r6,0x8000 /* Load the hidx mask */
+ ori r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */
ld r26,0(r26)
addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
@@ -515,7 +517,8 @@ htab_special_pfn:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -542,7 +545,8 @@ _GLOBAL(htab_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_4K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -607,7 +611,7 @@ htab_pte_insert_ok:
sld r4,r4,r5
andc r26,r26,r4
or r26,r26,r3
- ori r5,r6,0x8000
+ ori r5,r6,PTE_PAGE_HIDX_OFFSET
std r26,0(r5)
lwsync
std r30,0(r6)
@@ -840,7 +844,8 @@ ht64_insert_pte:
mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_64K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -863,7 +868,8 @@ _GLOBAL(ht64_call_hpte_insert1)
mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARAM(R9)(r1) /* segment size */
+ li r9,MMU_PAGE_64K /* actual page size */
+ ld r10,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ffc1e00..4c122c3 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,7 +39,7 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbie(unsigned long vpn, int psize, int ssize)
+static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
@@ -61,17 +61,31 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
switch (psize) {
case MMU_PAGE_4K:
+ /* clear out bits after (52) [0....52.....63] */
+ va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
+ va |= mmu_psize_defs[apsize].sllp << 6;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
- penc = mmu_psize_defs[psize].penc;
- va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ penc = mmu_psize_defs[psize].penc[apsize];
+ va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ /* Add AVAL part */
+ if (psize != apsize) {
+ /*
+ * MPSS, 64K base page size and 16MB parge page size
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
+ }
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -80,7 +94,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int ssize)
}
}
-static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
+static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long va;
unsigned int penc;
@@ -96,16 +110,30 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
switch (psize) {
case MMU_PAGE_4K:
+ /* clear out bits after(52) [0....52.....63] */
+ va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
+ va |= mmu_psize_defs[apsize].sllp << 6;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
- penc = mmu_psize_defs[psize].penc;
- va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ penc = mmu_psize_defs[psize].penc[apsize];
+ va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ /* Add AVAL part */
+ if (psize != apsize) {
+ /*
+ * MPSS, 64K base page size and 16MB parge page size
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
+ }
va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
@@ -114,7 +142,8 @@ static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
}
-static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
+static inline void tlbie(unsigned long vpn, int psize, int apsize,
+ int ssize, int local)
{
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -125,10 +154,10 @@ static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
- __tlbiel(vpn, psize, ssize);
+ __tlbiel(vpn, psize, apsize, ssize);
asm volatile("ptesync": : :"memory");
} else {
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, apsize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -156,7 +185,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags,
- unsigned long vflags, int psize, int ssize)
+ unsigned long vflags, int psize, int apsize, int ssize)
{
struct hash_pte *hptep = htab_address + hpte_group;
unsigned long hpte_v, hpte_r;
@@ -183,8 +212,8 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
if (i == HPTES_PER_GROUP)
return -1;
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
@@ -244,6 +273,51 @@ static long native_hpte_remove(unsigned long hpte_group)
return i;
}
+static inline int __hpte_actual_psize(unsigned int lp, int psize)
+{
+ int i, shift;
+ unsigned int mask;
+
+ /* start from 1 ignoring MMU_PAGE_4K */
+ for (i = 1; i < MMU_PAGE_COUNT; i++) {
+
+ /* invalid penc */
+ if (mmu_psize_defs[psize].penc[i] == -1)
+ continue;
+ /*
+ * encoding bits per actual page size
+ * PTE LP actual page size
+ * rrrr rrrz >=8KB
+ * rrrr rrzz >=16KB
+ * rrrr rzzz >=32KB
+ * rrrr zzzz >=64KB
+ * .......
+ */
+ shift = mmu_psize_defs[i].shift - LP_SHIFT;
+ if (shift > LP_BITS)
+ shift = LP_BITS;
+ mask = (1 << shift) - 1;
+ if ((lp & mask) == mmu_psize_defs[psize].penc[i])
+ return i;
+ }
+ return -1;
+}
+
+static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
+{
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
+ if (!(hptep->v & HPTE_V_VALID))
+ return -1;
+
+ /* First check if it is large page */
+ if (!(hptep->v & HPTE_V_LARGE))
+ return MMU_PAGE_4K;
+
+ return __hpte_actual_psize(lp, psize);
+}
+
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long vpn, int psize, int ssize,
int local)
@@ -251,8 +325,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0;
+ int actual_psize;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
@@ -260,9 +335,20 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
hpte_v = hptep->v;
-
- /* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+ actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
+ if (actual_psize < 0) {
+ actual_psize = psize;
+ ret = -1;
+ goto err_out;
+ }
+ if (!HPTE_V_COMPARE(hpte_v, want_v)) {
DBG_LOW(" -> miss\n");
ret = -1;
} else {
@@ -271,10 +357,11 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
}
+err_out:
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
- tlbie(vpn, psize, ssize, local);
+ tlbie(vpn, psize, actual_psize, ssize, local);
return ret;
}
@@ -288,7 +375,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
unsigned long want_v, hpte_v;
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -315,6 +402,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
+ int actual_psize;
unsigned long vpn;
unsigned long vsid;
long slot;
@@ -327,13 +415,16 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
+ actual_psize = hpte_actual_psize(hptep, psize);
+ if (actual_psize < 0)
+ actual_psize = psize;
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
(newpp & (HPTE_R_PP | HPTE_R_N));
/* Ensure it is out of the tlb too. */
- tlbie(vpn, psize, ssize, 0);
+ tlbie(vpn, psize, actual_psize, ssize, 0);
}
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
@@ -343,64 +434,66 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
unsigned long hpte_v;
unsigned long want_v;
unsigned long flags;
+ int actual_psize;
local_irq_save(flags);
DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
- /* Even if we miss, we need to invalidate the TLB */
- if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
+ actual_psize = hpte_actual_psize(hptep, psize);
+ /*
+ * We need to invalidate the TLB always because hpte_remove doesn't do
+ * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+ * random entry from it. When we do that we don't invalidate the TLB
+ * (hpte_remove) because we assume the old translation is still
+ * technically "valid".
+ */
+ if (actual_psize < 0) {
+ actual_psize = psize;
+ native_unlock_hpte(hptep);
+ goto err_out;
+ }
+ if (!HPTE_V_COMPARE(hpte_v, want_v))
native_unlock_hpte(hptep);
else
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->v = 0;
+err_out:
/* Invalidate the TLB */
- tlbie(vpn, psize, ssize, local);
-
+ tlbie(vpn, psize, actual_psize, ssize, local);
local_irq_restore(flags);
}
-#define LP_SHIFT 12
-#define LP_BITS 8
-#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
-
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
- int *psize, int *ssize, unsigned long *vpn)
+ int *psize, int *apsize, int *ssize, unsigned long *vpn)
{
unsigned long avpn, pteg, vpi;
- unsigned long hpte_r = hpte->r;
unsigned long hpte_v = hpte->v;
unsigned long vsid, seg_off;
- int i, size, shift, penc;
+ int size, a_size, shift;
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
- if (!(hpte_v & HPTE_V_LARGE))
- size = MMU_PAGE_4K;
- else {
- for (i = 0; i < LP_BITS; i++) {
- if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
- break;
- }
- penc = LP_MASK(i+1) >> LP_SHIFT;
+ if (!(hpte_v & HPTE_V_LARGE)) {
+ size = MMU_PAGE_4K;
+ a_size = MMU_PAGE_4K;
+ } else {
for (size = 0; size < MMU_PAGE_COUNT; size++) {
- /* 4K pages are not represented by LP */
- if (size == MMU_PAGE_4K)
- continue;
-
/* valid entries have a shift value */
if (!mmu_psize_defs[size].shift)
continue;
- if (penc == mmu_psize_defs[size].penc)
+ a_size = __hpte_actual_psize(lp, size);
+ if (a_size != -1)
break;
}
}
-
/* This works for all page sizes, and for 256M and 1T segments */
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
@@ -433,7 +526,8 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
default:
*vpn = size = 0;
}
- *psize = size;
+ *psize = size;
+ *apsize = a_size;
}
/*
@@ -451,7 +545,7 @@ static void native_hpte_clear(void)
struct hash_pte *hptep = htab_address;
unsigned long hpte_v;
unsigned long pteg_count;
- int psize, ssize;
+ int psize, apsize, ssize;
pteg_count = htab_hash_mask + 1;
@@ -477,9 +571,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
- hpte_decode(hptep, slot, &psize, &ssize, &vpn);
+ hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
hptep->v = 0;
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, apsize, ssize);
}
}
@@ -520,7 +614,7 @@ static void native_flush_hash_range(unsigned long number, int local)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -540,7 +634,7 @@ static void native_flush_hash_range(unsigned long number, int local)
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
- __tlbiel(vpn, psize, ssize);
+ __tlbiel(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("ptesync":::"memory");
@@ -557,7 +651,7 @@ static void native_flush_hash_range(unsigned long number, int local)
pte_iterate_hashed_subpages(pte, psize,
vpn, index, shift) {
- __tlbie(vpn, psize, ssize);
+ __tlbie(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3a292be..e303a6d 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -33,6 +33,7 @@
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/memblock.h>
+#include <linux/context_tracking.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
@@ -55,6 +56,7 @@
#include <asm/code-patching.h>
#include <asm/fadump.h>
#include <asm/firmware.h>
+#include <asm/tm.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -125,7 +127,7 @@ static struct mmu_psize_def mmu_psize_defaults_old[] = {
[MMU_PAGE_4K] = {
.shift = 12,
.sllp = 0,
- .penc = 0,
+ .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
.avpnm = 0,
.tlbiel = 0,
},
@@ -139,14 +141,15 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
[MMU_PAGE_4K] = {
.shift = 12,
.sllp = 0,
- .penc = 0,
+ .penc = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
.avpnm = 0,
.tlbiel = 1,
},
[MMU_PAGE_16M] = {
.shift = 24,
.sllp = SLB_VSID_L,
- .penc = 0,
+ .penc = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
+ [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
.avpnm = 0x1UL,
.tlbiel = 0,
},
@@ -194,6 +197,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
+ /*
+ * If we hit a bad address return error.
+ */
+ if (!vsid)
+ return -1;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
@@ -203,7 +211,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
BUG_ON(!ppc_md.hpte_insert);
ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
- HPTE_V_BOLTED, psize, ssize);
+ HPTE_V_BOLTED, psize, psize, ssize);
if (ret < 0)
break;
@@ -270,6 +278,30 @@ static void __init htab_init_seg_sizes(void)
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
}
+static int __init get_idx_from_shift(unsigned int shift)
+{
+ int idx = -1;
+
+ switch (shift) {
+ case 0xc:
+ idx = MMU_PAGE_4K;
+ break;
+ case 0x10:
+ idx = MMU_PAGE_64K;
+ break;
+ case 0x14:
+ idx = MMU_PAGE_1M;
+ break;
+ case 0x18:
+ idx = MMU_PAGE_16M;
+ break;
+ case 0x22:
+ idx = MMU_PAGE_16G;
+ break;
+ }
+ return idx;
+}
+
static int __init htab_dt_scan_page_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -285,64 +317,65 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node,
"ibm,segment-page-sizes", &size);
if (prop != NULL) {
- DBG("Page sizes from device-tree:\n");
+ pr_info("Page sizes from device-tree:\n");
size /= 4;
cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
while(size > 0) {
- unsigned int shift = prop[0];
+ unsigned int base_shift = prop[0];
unsigned int slbenc = prop[1];
unsigned int lpnum = prop[2];
- unsigned int lpenc = 0;
struct mmu_psize_def *def;
- int idx = -1;
+ int idx, base_idx;
size -= 3; prop += 3;
- while(size > 0 && lpnum) {
- if (prop[0] == shift)
- lpenc = prop[1];
- prop += 2; size -= 2;
- lpnum--;
+ base_idx = get_idx_from_shift(base_shift);
+ if (base_idx < 0) {
+ /*
+ * skip the pte encoding also
+ */
+ prop += lpnum * 2; size -= lpnum * 2;
+ continue;
}
- switch(shift) {
- case 0xc:
- idx = MMU_PAGE_4K;
- break;
- case 0x10:
- idx = MMU_PAGE_64K;
- break;
- case 0x14:
- idx = MMU_PAGE_1M;
- break;
- case 0x18:
- idx = MMU_PAGE_16M;
+ def = &mmu_psize_defs[base_idx];
+ if (base_idx == MMU_PAGE_16M)
cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
- break;
- case 0x22:
- idx = MMU_PAGE_16G;
- break;
- }
- if (idx < 0)
- continue;
- def = &mmu_psize_defs[idx];
- def->shift = shift;
- if (shift <= 23)
+
+ def->shift = base_shift;
+ if (base_shift <= 23)
def->avpnm = 0;
else
- def->avpnm = (1 << (shift - 23)) - 1;
+ def->avpnm = (1 << (base_shift - 23)) - 1;
def->sllp = slbenc;
- def->penc = lpenc;
- /* We don't know for sure what's up with tlbiel, so
+ /*
+ * We don't know for sure what's up with tlbiel, so
* for now we only set it for 4K and 64K pages
*/
- if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
+ if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
def->tlbiel = 1;
else
def->tlbiel = 0;
- DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
- "tlbiel=%d, penc=%d\n",
- idx, shift, def->sllp, def->avpnm, def->tlbiel,
- def->penc);
+ while (size > 0 && lpnum) {
+ unsigned int shift = prop[0];
+ int penc = prop[1];
+
+ prop += 2; size -= 2;
+ lpnum--;
+
+ idx = get_idx_from_shift(shift);
+ if (idx < 0)
+ continue;
+
+ if (penc == -1)
+ pr_err("Invalid penc for base_shift=%d "
+ "shift=%d\n", base_shift, shift);
+
+ def->penc[idx] = penc;
+ pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
+ " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
+ base_shift, shift, def->sllp,
+ def->avpnm, def->tlbiel, def->penc[idx]);
+ }
}
return 1;
}
@@ -391,10 +424,21 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
}
#endif /* CONFIG_HUGETLB_PAGE */
+static void mmu_psize_set_default_penc(void)
+{
+ int bpsize, apsize;
+ for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
+ for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++)
+ mmu_psize_defs[bpsize].penc[apsize] = -1;
+}
+
static void __init htab_init_page_sizes(void)
{
int rc;
+ /* se the invalid penc to -1 */
+ mmu_psize_set_default_penc();
+
/* Default to 4K pages only */
memcpy(mmu_psize_defs, mmu_psize_defaults_old,
sizeof(mmu_psize_defaults_old));
@@ -758,6 +802,8 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
+ else
+ stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@@ -891,14 +937,14 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
- int ssize, int psize, unsigned long pte)
+ int ssize, int psize, int lpsize, unsigned long pte)
{
if (!printk_ratelimit())
return;
pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
ea, access, current->comm);
- pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
- trap, vsid, ssize, psize, pte);
+ pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
+ trap, vsid, ssize, psize, lpsize, pte);
}
/* Result code is:
@@ -909,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
*/
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
+ enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
struct mm_struct *mm;
@@ -921,11 +968,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
- if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
- DBG_LOW(" out of pgtable range !\n");
- return 1;
- }
-
/* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
@@ -933,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mm = current->mm;
if (! mm) {
DBG_LOW(" user region with no mm !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
@@ -952,14 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
/* Not a valid range
* Send the problem up to do_page_fault
*/
- return 1;
+ rc = 1;
+ goto bail;
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
+ /* Bad address. */
+ if (!vsid) {
+ DBG_LOW("Bad address!\n");
+ rc = 1;
+ goto bail;
+ }
/* Get pgdir */
pgdir = mm->pgd;
- if (pgdir == NULL)
- return 1;
+ if (pgdir == NULL) {
+ rc = 1;
+ goto bail;
+ }
/* Check CPU locality */
tmp = cpumask_of(smp_processor_id());
@@ -982,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
/* Add _PAGE_PRESENT to the required access perm */
@@ -993,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
*/
if (access & ~pte_val(*ptep)) {
DBG_LOW(" no access !\n");
- return 1;
+ rc = 1;
+ goto bail;
}
#ifdef CONFIG_HUGETLB_PAGE
- if (hugeshift)
- return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+ if (hugeshift) {
+ rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
ssize, hugeshift, psize);
+ goto bail;
+ }
#endif /* CONFIG_HUGETLB_PAGE */
#ifndef CONFIG_PPC_64K_PAGES
@@ -1071,7 +1127,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize, psize,
- pte_val(*ptep));
+ psize, pte_val(*ptep));
#ifndef CONFIG_PPC_64K_PAGES
DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
@@ -1079,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
pte_val(*(ptep + PTRS_PER_PTE)));
#endif
DBG_LOW(" -> rc=%d\n", rc);
+
+bail:
+ exception_exit(prev_state);
return rc;
}
EXPORT_SYMBOL_GPL(hash_page);
@@ -1125,6 +1184,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
+ if (!vsid)
+ return;
/* Hash doesn't like irqs */
local_irq_save(flags);
@@ -1147,7 +1208,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize,
- mm->context.user_psize, pte_val(*ptep));
+ mm->context.user_psize,
+ mm->context.user_psize,
+ pte_val(*ptep));
local_irq_restore(flags);
}
@@ -1171,6 +1234,22 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
} pte_iterate_hashed_end();
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Transactions are not aborted by tlbiel, only tlbie.
+ * Without, syncing a page back to a block device w/ PIO could pick up
+ * transactional data (bad!) so we force an abort here. Before the
+ * sync the page will be made read-only, which will flush_hash_page.
+ * BIG ISSUE here: if the kernel uses a page from userspace without
+ * unmapping it first, it may see the speculated version.
+ */
+ if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
+ MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ tm_enable();
+ tm_abort(TM_CAUSE_TLBI);
+ }
+#endif
}
void flush_hash_range(unsigned long number, int local)
@@ -1194,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
*/
void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
{
+ enum ctx_state prev_state = exception_enter();
+
if (user_mode(regs)) {
#ifdef CONFIG_PPC_SUBPAGE_PROT
if (rc == -2)
@@ -1203,23 +1284,64 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
_exception(SIGBUS, regs, BUS_ADRERR, address);
} else
bad_page_fault(regs, address, SIGBUS);
+
+ exception_exit(prev_state);
+}
+
+long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+ unsigned long pa, unsigned long rflags,
+ unsigned long vflags, int psize, int ssize)
+{
+ unsigned long hpte_group;
+ long slot;
+
+repeat:
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
+ psize, psize, ssize);
+
+ /* Primary is full, try the secondary */
+ if (unlikely(slot == -1)) {
+ hpte_group = ((~hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
+ vflags | HPTE_V_SECONDARY,
+ psize, psize, ssize);
+ if (slot == -1) {
+ if (mftb() & 0x1)
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP)&~0x7UL;
+
+ ppc_md.hpte_remove(hpte_group);
+ goto repeat;
+ }
+ }
+
+ return slot;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
- unsigned long hash, hpteg;
+ unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
- int ret;
+ long ret;
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
- hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
- mode, HPTE_V_BOLTED,
- mmu_linear_psize, mmu_kernel_ssize);
+ /* Don't create HPTE entries for bad address */
+ if (!vsid)
+ return;
+
+ ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
+ HPTE_V_BOLTED,
+ mmu_linear_psize, mmu_kernel_ssize);
+
BUG_ON (ret < 0);
spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cecad34..0f1d94a 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -14,6 +14,10 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
+extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
+ unsigned long pa, unsigned long rlags,
+ unsigned long vflags, int psize, int ssize);
+
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
@@ -83,14 +87,9 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (likely(!(old_pte & _PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(vpn, shift, ssize);
- unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
-repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
-
/* clear HPTE slot informations in new PTE */
#ifdef CONFIG_PPC_64K_PAGES
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
@@ -101,26 +100,8 @@ repeat:
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED));
- /* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- mmu_psize, ssize);
-
- /* Primary is full, try the secondary */
- if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
- HPTE_V_SECONDARY,
- mmu_psize, ssize);
- if (slot == -1) {
- if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
-
- ppc_md.hpte_remove(hpte_group);
- goto repeat;
- }
- }
+ slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
+ mmu_psize, ssize);
/*
* Hypervisor failure. Restore old pte and return -1
@@ -129,7 +110,7 @@ repeat:
if (unlikely(slot == -2)) {
*ptep = __pte(old_pte);
hash_failure_debug(ea, access, vsid, trap, ssize,
- mmu_psize, old_pte);
+ mmu_psize, mmu_psize, old_pte);
return -1;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1a6de0a..77fdd2c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -48,30 +48,71 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif
-static inline int shift_to_mmu_psize(unsigned int shift)
+#define hugepd_none(hpd) ((hpd).pd == 0)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * At this point we do the placement change only for BOOK3S 64. This would
+ * possibly work on other subarchs.
+ */
+
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ */
+int pmd_huge(pmd_t pmd)
{
- int psize;
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pmd_val(pmd) & 0x3) != 0x0);
+}
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
- if (mmu_psize_defs[psize].shift == shift)
- return psize;
- return -1;
+int pud_huge(pud_t pud)
+{
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pud_val(pud) & 0x3) != 0x0);
}
-static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+int pgd_huge(pgd_t pgd)
{
- if (mmu_psize_defs[mmu_psize].shift)
- return mmu_psize_defs[mmu_psize].shift;
- BUG();
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return ((pgd_val(pgd) & 0x3) != 0x0);
+}
+#else
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
}
-#define hugepd_none(hpd) ((hpd).pd == 0)
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+int pgd_huge(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
+/*
+ * We have 4 cases for pgds and pmds:
+ * (1) invalid (all zeroes)
+ * (2) pointer to next table, as normal; bottom 6 bits == 0
+ * (3) leaf pte for huge page, bottom two bits != 00
+ * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
+ */
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
pgd_t *pg;
pud_t *pu;
pmd_t *pm;
+ pte_t *ret_pte;
hugepd_t *hpdp = NULL;
unsigned pdshift = PGDIR_SHIFT;
@@ -79,30 +120,43 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0;
pg = pgdir + pgd_index(ea);
- if (is_hugepd(pg)) {
+
+ if (pgd_huge(*pg)) {
+ ret_pte = (pte_t *) pg;
+ goto out;
+ } else if (is_hugepd(pg))
hpdp = (hugepd_t *)pg;
- } else if (!pgd_none(*pg)) {
+ else if (!pgd_none(*pg)) {
pdshift = PUD_SHIFT;
pu = pud_offset(pg, ea);
- if (is_hugepd(pu))
+
+ if (pud_huge(*pu)) {
+ ret_pte = (pte_t *) pu;
+ goto out;
+ } else if (is_hugepd(pu))
hpdp = (hugepd_t *)pu;
else if (!pud_none(*pu)) {
pdshift = PMD_SHIFT;
pm = pmd_offset(pu, ea);
- if (is_hugepd(pm))
+
+ if (pmd_huge(*pm)) {
+ ret_pte = (pte_t *) pm;
+ goto out;
+ } else if (is_hugepd(pm))
hpdp = (hugepd_t *)pm;
- else if (!pmd_none(*pm)) {
+ else if (!pmd_none(*pm))
return pte_offset_kernel(pm, ea);
- }
}
}
-
if (!hpdp)
return NULL;
+ ret_pte = hugepte_offset(hpdp, ea, pdshift);
+ pdshift = hugepd_shift(*hpdp);
+out:
if (shift)
- *shift = hugepd_shift(*hpdp);
- return hugepte_offset(hpdp, ea, pdshift);
+ *shift = pdshift;
+ return ret_pte;
}
EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
@@ -145,6 +199,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp)))
break;
else
+ /* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
}
/* If we bailed from the for loop early, an error occurred, clean up */
@@ -156,9 +211,15 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#else
if (!hugepd_none(*hpdp))
kmem_cache_free(cachep, new);
- else
+ else {
+#ifdef CONFIG_PPC_BOOK3S_64
+ hpdp->pd = (unsigned long)new |
+ (shift_to_mmu_psize(pshift) << 2);
+#else
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
+ }
+#endif
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -175,6 +236,61 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * At this point we do the placement change only for BOOK3S 64. This would
+ * possibly work on other subarchs.
+ */
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
+{
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ hugepd_t *hpdp = NULL;
+ unsigned pshift = __ffs(sz);
+ unsigned pdshift = PGDIR_SHIFT;
+
+ addr &= ~(sz-1);
+ pg = pgd_offset(mm, addr);
+
+ if (pshift == PGDIR_SHIFT)
+ /* 16GB huge page */
+ return (pte_t *) pg;
+ else if (pshift > PUD_SHIFT)
+ /*
+ * We need to use hugepd table
+ */
+ hpdp = (hugepd_t *)pg;
+ else {
+ pdshift = PUD_SHIFT;
+ pu = pud_alloc(mm, pg, addr);
+ if (pshift == PUD_SHIFT)
+ return (pte_t *)pu;
+ else if (pshift > PMD_SHIFT)
+ hpdp = (hugepd_t *)pu;
+ else {
+ pdshift = PMD_SHIFT;
+ pm = pmd_alloc(mm, pu, addr);
+ if (pshift == PMD_SHIFT)
+ /* 16MB hugepage */
+ return (pte_t *)pm;
+ else
+ hpdp = (hugepd_t *)pm;
+ }
+ }
+ if (!hpdp)
+ return NULL;
+
+ BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
+
+ if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
+ return NULL;
+
+ return hugepte_offset(hpdp, addr, pdshift);
+}
+
+#else
+
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
pgd_t *pg;
@@ -212,6 +328,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(hpdp, addr, pdshift);
}
+#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
@@ -475,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
do {
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd))
+ if (!is_hugepd(pmd)) {
+ /*
+ * if it is not hugepd pointer, we should already find
+ * it cleared.
+ */
+ WARN_ON(!pmd_none_or_clear_bad(pmd));
continue;
+ }
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
@@ -628,16 +751,6 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
return page;
}
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
@@ -646,8 +759,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
-static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
+int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
unsigned long pte_end;
@@ -742,7 +855,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
- return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
+ return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
}
#endif
@@ -883,11 +996,16 @@ static int __init hugetlbpage_init(void)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
-
- pgtable_cache_add(pdshift - shift, NULL);
- if (!PGT_CACHE(pdshift - shift))
- panic("hugetlbpage_init(): could not create "
- "pgtable cache for %d bit pagesize\n", shift);
+ /*
+ * if we have pdshift and shift value same, we don't
+ * use pgt cache for hugepd.
+ */
+ if (pdshift != shift) {
+ pgtable_cache_add(pdshift - shift, NULL);
+ if (!PGT_CACHE(pdshift - shift))
+ panic("hugetlbpage_init(): could not create "
+ "pgtable cache for %d bit pagesize\n", shift);
+ }
}
/* Set default large page size. Currently, we pick 16M or 1M
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 8cdbd86..915412e 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -67,7 +67,7 @@
void switch_cop(struct mm_struct *next)
{
-#ifdef CONFIG_ICSWX_PID
+#ifdef CONFIG_PPC_ICSWX_PID
mtspr(SPRN_PID, next->context.cop_pid);
#endif
mtspr(SPRN_ACOP, next->context.acop);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 95a4529..a90b9c4 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -129,8 +129,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
- PGT_CACHE(shift) = new;
-
+ pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -216,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
unsigned long phys)
{
int mapped = htab_bolt_mapping(start, start + page_size, phys,
- PAGE_KERNEL, mmu_vmemmap_psize,
+ pgprot_val(PAGE_KERNEL),
+ mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
@@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmemmap_list = vmem_back;
}
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long start = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
- pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
- start_page, nr_pages, node);
- pr_debug(" -> map %lx..%lx\n", start, end);
+ pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
void *p;
@@ -297,5 +292,10 @@ int __meminit vmemmap_populate(struct page *start_page,
return 0;
}
+
+void vmemmap_free(unsigned long start, unsigned long end)
+{
+}
+
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1a8f694..af90b90 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -70,10 +70,9 @@ unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
+EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
-
EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
@@ -137,6 +136,18 @@ int arch_add_memory(int nid, u64 start, u64 size)
return __add_pages(nid, zone, start_pfn, nr_pages);
}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
+
+ zone = page_zone(pfn_to_page(start_pfn));
+ return __remove_pages(zone, start_pfn, nr_pages);
+}
+#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
@@ -199,13 +210,10 @@ void __init do_init_bootmem(void)
min_low_pfn = MEMORY_START >> PAGE_SHIFT;
boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
- /* Add active regions with valid PFNs */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
- }
+ /* Place all memblock_regions in the same node and merge contiguous
+ * memblock_regions
+ */
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
/* Add all physical memory to the bootmem map, mark each area
* present.
@@ -349,13 +357,9 @@ void __init mem_init(void)
struct page *page = pfn_to_page(pfn);
if (memblock_is_reserved(paddr))
continue;
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
reservedpages--;
}
- totalram_pages += totalhigh_pages;
printk(KERN_DEBUG "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
@@ -402,39 +406,14 @@ void __init mem_init(void)
void free_initmem(void)
{
- unsigned long addr;
-
ppc_md.progress = ppc_printk_progress;
-
- addr = (unsigned long)__init_begin;
- for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing unused kernel memory: %luk freed\n",
- ((unsigned long)__init_end -
- (unsigned long)__init_begin) >> 10);
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
-
- start = _ALIGN_DOWN(start, PAGE_SIZE);
- end = _ALIGN_UP(end, PAGE_SIZE);
- pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0..178876ae 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -23,21 +23,13 @@
#include <linux/slab.h>
#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
#include "icswx.h"
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
-/*
- * 256MB segment
- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
- * available for user mappings. Each segment contains 2^28 bytes. Each
- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
- * (19 == 37 + 28 - 46).
- */
-#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
-
int __init_new_context(void)
{
int index;
@@ -56,7 +48,7 @@ again:
else if (err)
return err;
- if (index > MAX_CONTEXT) {
+ if (index > MAX_USER_CONTEXT) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
@@ -94,6 +86,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */
+#ifdef CONFIG_PPC_64K_PAGES
+ mm->context.pte_frag = NULL;
+#endif
return 0;
}
@@ -105,13 +100,46 @@ void __destroy_context(int context_id)
}
EXPORT_SYMBOL_GPL(__destroy_context);
+#ifdef CONFIG_PPC_64K_PAGES
+static void destroy_pagetable_page(struct mm_struct *mm)
+{
+ int count;
+ void *pte_frag;
+ struct page *page;
+
+ pte_frag = mm->context.pte_frag;
+ if (!pte_frag)
+ return;
+
+ page = virt_to_page(pte_frag);
+ /* drop all the pending references */
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+ count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count);
+ if (!count) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+#else
+static inline void destroy_pagetable_page(struct mm_struct *mm)
+{
+ return;
+}
+#endif
+
+
void destroy_context(struct mm_struct *mm)
{
+
#ifdef CONFIG_PPC_ICSWX
drop_cop(mm->context.acop, mm);
kfree(mm->context.cop_lockp);
mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
+
+ destroy_pagetable_page(mm);
__destroy_context(mm->context.id);
subpage_prot_free(mm);
mm->context.id = MMU_NO_CONTEXT;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca..88c0425 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,11 @@
#include <linux/pfn.h>
#include <linux/cpuset.h>
#include <linux/node.h>
+#include <linux/stop_machine.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -29,6 +34,7 @@
#include <asm/paca.h>
#include <asm/hvcall.h>
#include <asm/setup.h>
+#include <asm/vdso.h>
static int numa_enabled = 1;
@@ -62,14 +68,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
*/
static void __init setup_node_to_cpumask_map(void)
{
- unsigned int node, num = 0;
+ unsigned int node;
/* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES) {
- for_each_node_mask(node, node_possible_map)
- num = node;
- nr_node_ids = num + 1;
- }
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
@@ -79,7 +82,7 @@ static void __init setup_node_to_cpumask_map(void)
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
-static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
+static int __init fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
unsigned long long mem;
@@ -201,7 +204,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return distance;
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
@@ -291,9 +294,7 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
- struct device_node *chosen;
struct device_node *root;
- const char *vec5;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
@@ -325,24 +326,10 @@ static int __init find_min_common_depth(void)
distance_ref_points_depth /= sizeof(int);
-#define VEC5_AFFINITY_BYTE 5
-#define VEC5_AFFINITY 0x80
-
- if (firmware_has_feature(FW_FEATURE_OPAL))
+ if (firmware_has_feature(FW_FEATURE_OPAL) ||
+ firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
+ dbg("Using form 1 affinity\n");
form1_affinity = 1;
- else {
- chosen = of_find_node_by_path("/chosen");
- if (chosen) {
- vec5 = of_get_property(chosen,
- "ibm,architecture-vec-5", NULL);
- if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
- VEC5_AFFINITY)) {
- dbg("Using form 1 affinity\n");
- form1_affinity = 1;
- }
-
- of_node_put(chosen);
- }
}
if (form1_affinity) {
@@ -1270,10 +1257,18 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
+struct topology_update_data {
+ struct topology_update_data *next;
+ unsigned int cpu;
+ int old_nid;
+ int new_nid;
+};
+
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
-static void set_topology_timer(void);
+static int prrn_enabled;
+static void reset_topology_timer(void);
/*
* Store the current values of the associativity change counters in the
@@ -1309,11 +1304,9 @@ static void setup_cpu_associativity_change_counters(void)
*/
static int update_cpu_associativity_changes_mask(void)
{
- int cpu, nr_cpus = 0;
+ int cpu;
cpumask_t *changes = &cpu_associativity_changes_mask;
- cpumask_clear(changes);
-
for_each_possible_cpu(cpu) {
int i, changed = 0;
u8 *counts = vphn_cpu_change_counts[cpu];
@@ -1327,11 +1320,10 @@ static int update_cpu_associativity_changes_mask(void)
}
if (changed) {
cpumask_set_cpu(cpu, changes);
- nr_cpus++;
}
}
- return nr_cpus;
+ return cpumask_weight(changes);
}
/*
@@ -1423,40 +1415,84 @@ static long vphn_get_associativity(unsigned long cpu,
}
/*
+ * Update the CPU maps and sysfs entries for a single CPU when its NUMA
+ * characteristics change. This function doesn't perform any locking and is
+ * only safe to call from stop_machine().
+ */
+static int update_cpu_topology(void *data)
+{
+ struct topology_update_data *update;
+ unsigned long cpu;
+
+ if (!data)
+ return -EINVAL;
+
+ cpu = get_cpu();
+
+ for (update = data; update; update = update->next) {
+ if (cpu != update->cpu)
+ continue;
+
+ unregister_cpu_under_node(update->cpu, update->old_nid);
+ unmap_cpu_from_node(update->cpu);
+ map_cpu_to_node(update->cpu, update->new_nid);
+ vdso_getcpu_init();
+ register_cpu_under_node(update->cpu, update->new_nid);
+ }
+
+ return 0;
+}
+
+/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid, changed = 0;
+ unsigned int cpu, changed = 0;
+ struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ cpumask_t updated_cpus;
struct device *dev;
+ int weight, i = 0;
+
+ weight = cpumask_weight(&cpu_associativity_changes_mask);
+ if (!weight)
+ return 0;
+
+ updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
+ if (!updates)
+ return 0;
- for_each_cpu(cpu,&cpu_associativity_changes_mask) {
+ cpumask_clear(&updated_cpus);
+
+ for_each_cpu(cpu, &cpu_associativity_changes_mask) {
+ ud = &updates[i++];
+ ud->cpu = cpu;
vphn_get_associativity(cpu, associativity);
- nid = associativity_to_nid(associativity);
+ ud->new_nid = associativity_to_nid(associativity);
- if (nid < 0 || !node_online(nid))
- nid = first_online_node;
+ if (ud->new_nid < 0 || !node_online(ud->new_nid))
+ ud->new_nid = first_online_node;
- old_nid = numa_cpu_lookup_table[cpu];
+ ud->old_nid = numa_cpu_lookup_table[cpu];
+ cpumask_set_cpu(cpu, &updated_cpus);
- /* Disable hotplug while we update the cpu
- * masks and sysfs.
- */
- get_online_cpus();
- unregister_cpu_under_node(cpu, old_nid);
- unmap_cpu_from_node(cpu);
- map_cpu_to_node(cpu, nid);
- register_cpu_under_node(cpu, nid);
- put_online_cpus();
-
- dev = get_cpu_device(cpu);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+
+ stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+
+ for (ud = &updates[0]; ud; ud = ud->next) {
+ dev = get_cpu_device(ud->cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
changed = 1;
}
+ kfree(updates);
return changed;
}
@@ -1473,49 +1509,165 @@ void topology_schedule_update(void)
static void topology_timer_fn(unsigned long ignored)
{
- if (!vphn_enabled)
- return;
- if (update_cpu_associativity_changes_mask() > 0)
+ if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
topology_schedule_update();
- set_topology_timer();
+ else if (vphn_enabled) {
+ if (update_cpu_associativity_changes_mask() > 0)
+ topology_schedule_update();
+ reset_topology_timer();
+ }
}
static struct timer_list topology_timer =
TIMER_INITIALIZER(topology_timer_fn, 0, 0);
-static void set_topology_timer(void)
+static void reset_topology_timer(void)
{
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
- add_timer(&topology_timer);
+ mod_timer(&topology_timer, topology_timer.expires);
+}
+
+#ifdef CONFIG_SMP
+
+static void stage_topology_update(int core_id)
+{
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
+ reset_topology_timer();
}
+static int dt_update_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct of_prop_reconfig *update;
+ int rc = NOTIFY_DONE;
+
+ switch (action) {
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ update = (struct of_prop_reconfig *)data;
+ if (!of_prop_cmp(update->dn->type, "cpu") &&
+ !of_prop_cmp(update->prop->name, "ibm,associativity")) {
+ u32 core_id;
+ of_property_read_u32(update->dn, "reg", &core_id);
+ stage_topology_update(core_id);
+ rc = NOTIFY_OK;
+ }
+ break;
+ }
+
+ return rc;
+}
+
+static struct notifier_block dt_update_nb = {
+ .notifier_call = dt_update_callback,
+};
+
+#endif
+
/*
- * Start polling for VPHN associativity changes.
+ * Start polling for associativity changes.
*/
int start_topology_update(void)
{
int rc = 0;
- /* Disabled until races with load balancing are fixed */
- if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
- get_lppaca()->shared_proc) {
- vphn_enabled = 1;
- setup_cpu_associativity_change_counters();
- init_timer_deferrable(&topology_timer);
- set_topology_timer();
- rc = 1;
+ if (firmware_has_feature(FW_FEATURE_PRRN)) {
+ if (!prrn_enabled) {
+ prrn_enabled = 1;
+ vphn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_register(&dt_update_nb);
+#endif
+ }
+ } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
+ get_lppaca()->shared_proc) {
+ if (!vphn_enabled) {
+ prrn_enabled = 0;
+ vphn_enabled = 1;
+ setup_cpu_associativity_change_counters();
+ init_timer_deferrable(&topology_timer);
+ reset_topology_timer();
+ }
}
return rc;
}
-__initcall(start_topology_update);
/*
* Disable polling for VPHN associativity changes.
*/
int stop_topology_update(void)
{
- vphn_enabled = 0;
- return del_timer_sync(&topology_timer);
+ int rc = 0;
+
+ if (prrn_enabled) {
+ prrn_enabled = 0;
+#ifdef CONFIG_SMP
+ rc = of_reconfig_notifier_unregister(&dt_update_nb);
+#endif
+ } else if (vphn_enabled) {
+ vphn_enabled = 0;
+ rc = del_timer_sync(&topology_timer);
+ }
+
+ return rc;
+}
+
+int prrn_is_enabled(void)
+{
+ return prrn_enabled;
+}
+
+static int topology_read(struct seq_file *file, void *v)
+{
+ if (vphn_enabled || prrn_enabled)
+ seq_puts(file, "on\n");
+ else
+ seq_puts(file, "off\n");
+
+ return 0;
+}
+
+static int topology_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, topology_read, NULL);
+}
+
+static ssize_t topology_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ char kbuf[4]; /* "on" or "off" plus null. */
+ int read_len;
+
+ read_len = count < 3 ? count : 3;
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EINVAL;
+
+ kbuf[read_len] = '\0';
+
+ if (!strncmp(kbuf, "on", 2))
+ start_topology_update();
+ else if (!strncmp(kbuf, "off", 3))
+ stop_topology_update();
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations topology_ops = {
+ .read = seq_read,
+ .write = topology_write,
+ .open = topology_open,
+ .release = single_release
+};
+
+static int topology_update_init(void)
+{
+ start_topology_update();
+ proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
+
+ return 0;
}
+device_initcall(topology_update_init);
#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a27..a854096 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
#endif
#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif
@@ -337,3 +337,121 @@ EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__iounmap_at);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static pte_t *get_from_cache(struct mm_struct *mm)
+{
+ void *pte_frag, *ret;
+
+ spin_lock(&mm->page_table_lock);
+ ret = mm->context.pte_frag;
+ if (ret) {
+ pte_frag = ret + PTE_FRAG_SIZE;
+ /*
+ * If we have taken up all the fragments mark PTE page NULL
+ */
+ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
+ pte_frag = NULL;
+ mm->context.pte_frag = pte_frag;
+ }
+ spin_unlock(&mm->page_table_lock);
+ return (pte_t *)ret;
+}
+
+static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
+{
+ void *ret = NULL;
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+ __GFP_REPEAT | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ ret = page_address(page);
+ spin_lock(&mm->page_table_lock);
+ /*
+ * If we find pgtable_page set, we return
+ * the allocated page with single fragement
+ * count.
+ */
+ if (likely(!mm->context.pte_frag)) {
+ atomic_set(&page->_count, PTE_FRAG_NR);
+ mm->context.pte_frag = ret + PTE_FRAG_SIZE;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ if (!kernel)
+ pgtable_page_ctor(page);
+
+ return (pte_t *)ret;
+}
+
+pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+{
+ pte_t *pte;
+
+ pte = get_from_cache(mm);
+ if (pte)
+ return pte;
+
+ return __alloc_for_cache(mm, kernel);
+}
+
+void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
+{
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void page_table_free_rcu(void *table)
+{
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ if (!shift)
+ /* PTE page needs special handling */
+ page_table_free_rcu(table);
+ else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+#else
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ if (!shift) {
+ /* PTE page needs special handling */
+ struct page *page = virt_to_page(table);
+ if (put_page_testzero(page)) {
+ pgtable_page_dtor(page);
+ free_hot_cold_page(page, 0);
+ }
+ } else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+#endif
+#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca2..17aa6df 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
- /* r3 = faulting address */
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r3,4,(63 - 46 - 4)
+ bne- 8f
srdi r9,r3,60 /* get region */
- srdi r10,r3,28 /* get esid */
+ srdi r10,r3,SID_SHIFT /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-0: /* user address: proto-VSID = context << 15 | ESID. First check
- * if the address is within the boundaries of the user region
- */
- srdi. r9,r10,USER_ESID_BITS
- bne- 8f /* invalid ea bits set */
-
-
+0:
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- rldimi r10,r9,USER_ESID_BITS,0
-BEGIN_FTR_SECTION
bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
li r10,0 /* BAD_VSID */
+ li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
- rldimi r10,r9,USER_ESID_BITS,0
-
/* fall through slb_finish_load */
#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
+ rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M)
/*
* bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
+ * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/
slb_finish_load_1T:
- srdi r10,r10,40-28 /* get 1T ESID */
+ srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
+ rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index cf9dada..3e99c14 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -237,134 +237,112 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
#endif
}
+/*
+ * Compute which slice addr is part of;
+ * set *boundary_addr to the start or end boundary of that slice
+ * (depending on 'end' parameter);
+ * return boolean indicating if the slice is marked as available in the
+ * 'available' slice_mark.
+ */
+static bool slice_scan_available(unsigned long addr,
+ struct slice_mask available,
+ int end,
+ unsigned long *boundary_addr)
+{
+ unsigned long slice;
+ if (addr < SLICE_LOW_TOP) {
+ slice = GET_LOW_SLICE_INDEX(addr);
+ *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
+ return !!(available.low_slices & (1u << slice));
+ } else {
+ slice = GET_HIGH_SLICE_INDEX(addr);
+ *boundary_addr = (slice + end) ?
+ ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
+ return !!(available.high_slices & (1u << slice));
+ }
+}
+
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize, int use_cache)
+ int psize)
{
- struct vm_area_struct *vma;
- unsigned long start_addr, addr;
- struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-
- if (use_cache) {
- if (len <= mm->cached_hole_size) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- } else
- start_addr = addr = mm->free_area_cache;
- } else
- start_addr = addr = TASK_UNMAPPED_BASE;
-
-full_search:
- for (;;) {
- addr = _ALIGN_UP(addr, 1ul << pshift);
- if ((TASK_SIZE - len) < addr)
- break;
- vma = find_vma(mm, addr);
- BUG_ON(vma && (addr >= vma->vm_end));
-
- mask = slice_range_to_mask(addr, len);
- if (!slice_check_fit(mask, available)) {
- if (addr < SLICE_LOW_TOP)
- addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
- else
- addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ unsigned long addr, found, next_end;
+ struct vm_unmapped_area_info info;
+
+ info.flags = 0;
+ info.length = len;
+ info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+ info.align_offset = 0;
+
+ addr = TASK_UNMAPPED_BASE;
+ while (addr < TASK_SIZE) {
+ info.low_limit = addr;
+ if (!slice_scan_available(addr, available, 1, &addr))
continue;
+
+ next_slice:
+ /*
+ * At this point [info.low_limit; addr) covers
+ * available slices only and ends at a slice boundary.
+ * Check if we need to reduce the range, or if we can
+ * extend it to cover the next available slice.
+ */
+ if (addr >= TASK_SIZE)
+ addr = TASK_SIZE;
+ else if (slice_scan_available(addr, available, 1, &next_end)) {
+ addr = next_end;
+ goto next_slice;
}
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Remember the place where we stopped the search:
- */
- if (use_cache)
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
- }
+ info.high_limit = addr;
- /* Make sure we didn't miss any holes */
- if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto full_search;
+ found = vm_unmapped_area(&info);
+ if (!(found & ~PAGE_MASK))
+ return found;
}
+
return -ENOMEM;
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize, int use_cache)
+ int psize)
{
- struct vm_area_struct *vma;
- unsigned long addr;
- struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+ unsigned long addr, found, prev;
+ struct vm_unmapped_area_info info;
- /* check if free_area_cache is useful for us */
- if (use_cache) {
- if (len <= mm->cached_hole_size) {
- mm->cached_hole_size = 0;
- mm->free_area_cache = mm->mmap_base;
- }
-
- /* either no address requested or can't fit in requested
- * address hole
- */
- addr = mm->free_area_cache;
-
- /* make sure it can fit in the remaining address space */
- if (addr > len) {
- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
- mask = slice_range_to_mask(addr, len);
- if (slice_check_fit(mask, available) &&
- slice_area_is_free(mm, addr, len))
- /* remember the address as a hint for
- * next time
- */
- return (mm->free_area_cache = addr);
- }
- }
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+ info.align_offset = 0;
addr = mm->mmap_base;
- while (addr > len) {
- /* Go down by chunk size */
- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
-
- /* Check for hit with different page size */
- mask = slice_range_to_mask(addr, len);
- if (!slice_check_fit(mask, available)) {
- if (addr < SLICE_LOW_TOP)
- addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
- else if (addr < (1ul << SLICE_HIGH_SHIFT))
- addr = SLICE_LOW_TOP;
- else
- addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
+ while (addr > PAGE_SIZE) {
+ info.high_limit = addr;
+ if (!slice_scan_available(addr - 1, available, 0, &addr))
continue;
- }
+ prev_slice:
/*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
+ * At this point [addr; info.high_limit) covers
+ * available slices only and starts at a slice boundary.
+ * Check if we need to reduce the range, or if we can
+ * extend it to cover the previous available slice.
*/
- vma = find_vma(mm, addr);
- if (!vma || (addr + len) <= vma->vm_start) {
- /* remember the address as a hint for next time */
- if (use_cache)
- mm->free_area_cache = addr;
- return addr;
+ if (addr < PAGE_SIZE)
+ addr = PAGE_SIZE;
+ else if (slice_scan_available(addr - 1, available, 0, &prev)) {
+ addr = prev;
+ goto prev_slice;
}
+ info.low_limit = addr;
- /* remember the largest hole we saw so far */
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start;
+ found = vm_unmapped_area(&info);
+ if (!(found & ~PAGE_MASK))
+ return found;
}
/*
@@ -373,28 +351,18 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- addr = slice_find_area_bottomup(mm, len, available, psize, 0);
-
- /*
- * Restore the topdown base:
- */
- if (use_cache) {
- mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
- }
-
- return addr;
+ return slice_find_area_bottomup(mm, len, available, psize);
}
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
struct slice_mask mask, int psize,
- int topdown, int use_cache)
+ int topdown)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize, use_cache);
+ return slice_find_area_topdown(mm, len, mask, psize);
else
- return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
+ return slice_find_area_bottomup(mm, len, mask, psize);
}
#define or_mask(dst, src) do { \
@@ -415,7 +383,7 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
- int topdown, int use_cache)
+ int topdown)
{
struct slice_mask mask = {0, 0};
struct slice_mask good_mask;
@@ -430,8 +398,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
BUG_ON(mm->task_size == 0);
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
- slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
- addr, len, flags, topdown, use_cache);
+ slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
+ addr, len, flags, topdown);
if (len > mm->task_size)
return -ENOMEM;
@@ -503,8 +471,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing
* slices for that size
*/
- newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
- use_cache);
+ newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
if (newaddr != -ENOMEM) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
@@ -536,8 +503,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* anywhere in the good area.
*/
if (addr) {
- addr = slice_find_area(mm, len, good_mask, psize, topdown,
- use_cache);
+ addr = slice_find_area(mm, len, good_mask, psize, topdown);
if (addr != -ENOMEM) {
slice_dbg(" found area at 0x%lx\n", addr);
return addr;
@@ -547,15 +513,14 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing slices
* for that size plus free slices
*/
- addr = slice_find_area(mm, len, potential_mask, psize, topdown,
- use_cache);
+ addr = slice_find_area(mm, len, potential_mask, psize, topdown);
#ifdef CONFIG_PPC_64K_PAGES
if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
or_mask(potential_mask, compat_mask);
addr = slice_find_area(mm, len, potential_mask, psize,
- topdown, use_cache);
+ topdown);
}
#endif
@@ -586,8 +551,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long flags)
{
return slice_get_unmapped_area(addr, len, flags,
- current->mm->context.user_psize,
- 0, 1);
+ current->mm->context.user_psize, 0);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -597,8 +561,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long flags)
{
return slice_get_unmapped_area(addr0, len, flags,
- current->mm->context.user_psize,
- 1, 1);
+ current->mm->context.user_psize, 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef5..023ec8a 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize);
- WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
+ WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 44097cc..033a980 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -427,8 +427,8 @@ static void setup_page_sizes(void)
int i, psize;
#ifdef CONFIG_PPC_FSL_BOOK3E
- int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
+ int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e834f1e..c427ae3 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -671,16 +671,12 @@ void bpf_jit_compile(struct sk_filter *fp)
}
if (bpf_jit_enable > 1)
- pr_info("flen=%d proglen=%u pass=%d image=%p\n",
- flen, proglen, pass, image);
+ /* Note that we output the base address of the code_base
+ * rather than image, since opcodes are in code_base.
+ */
+ bpf_jit_dump(flen, proglen, pass, code_base);
if (image) {
- if (bpf_jit_enable > 1)
- print_hex_dump(KERN_ERR, "JIT code: ",
- DUMP_PREFIX_ADDRESS,
- 16, 1, code_base,
- proglen, false);
-
bpf_flush_icache(code_base, code_base + (proglen/4));
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 06dd8d5..60d71ee 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -2,9 +2,10 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PERF_EVENTS) += callchain.o
-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
- power5+-pmu.o power6-pmu.o power7-pmu.o
+ power5+-pmu.o power6-pmu.o power7-pmu.o \
+ power8-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
diff --git a/arch/powerpc/perf/bhrb.S b/arch/powerpc/perf/bhrb.S
new file mode 100644
index 0000000..d85f9a5
--- /dev/null
+++ b/arch/powerpc/perf/bhrb.S
@@ -0,0 +1,44 @@
+/*
+ * Basic assembly code to read BHRB entries
+ *
+ * Copyright 2013 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+
+ .text
+
+.balign 8
+
+/* r3 = n (where n = [0-31])
+ * The maximum number of BHRB entries supported with PPC_MFBHRBE instruction
+ * is 1024. We have limited number of table entries here as POWER8 implements
+ * 32 BHRB entries.
+ */
+
+/* .global read_bhrb */
+_GLOBAL(read_bhrb)
+ cmpldi r3,31
+ bgt 1f
+ ld r4,bhrb_table@got(r2)
+ sldi r3,r3,3
+ add r3,r4,r3
+ mtctr r3
+ bctr
+1: li r3,0
+ blr
+
+#define MFBHRB_TABLE1(n) PPC_MFBHRBE(R3,n); blr
+#define MFBHRB_TABLE2(n) MFBHRB_TABLE1(n); MFBHRB_TABLE1(n+1)
+#define MFBHRB_TABLE4(n) MFBHRB_TABLE2(n); MFBHRB_TABLE2(n+2)
+#define MFBHRB_TABLE8(n) MFBHRB_TABLE4(n); MFBHRB_TABLE4(n+4)
+#define MFBHRB_TABLE16(n) MFBHRB_TABLE8(n); MFBHRB_TABLE8(n+8)
+#define MFBHRB_TABLE32(n) MFBHRB_TABLE16(n); MFBHRB_TABLE16(n+16)
+
+bhrb_table:
+ MFBHRB_TABLE32(0)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index aa2465e..29c6482 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -13,11 +13,18 @@
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/uaccess.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
+#include <asm/code-patching.h>
+
+#define BHRB_MAX_ENTRIES 32
+#define BHRB_TARGET 0x0000000000000002
+#define BHRB_PREDICTION 0x0000000000000001
+#define BHRB_EA 0xFFFFFFFFFFFFFFFC
struct cpu_hw_events {
int n_events;
@@ -38,7 +45,15 @@ struct cpu_hw_events {
unsigned int group_flag;
int n_txn_start;
+
+ /* BHRB bits */
+ u64 bhrb_filter; /* BHRB HW branch filter */
+ int bhrb_users;
+ void *bhrb_context;
+ struct perf_branch_stack bhrb_stack;
+ struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
};
+
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
@@ -87,8 +102,17 @@ static inline int siar_valid(struct pt_regs *regs)
return 1;
}
+static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
+static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
+void power_pmu_flush_branch_stack(void) {}
+static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
#endif /* CONFIG_PPC32 */
+static bool regs_use_siar(struct pt_regs *regs)
+{
+ return !!regs->result;
+}
+
/*
* Things that are specific to 64-bit implementations.
*/
@@ -98,11 +122,12 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
- if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
+
return 0;
}
@@ -111,43 +136,57 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
- * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
+ * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
- unsigned long sdsync;
+ bool sdar_valid;
- if (ppmu->flags & PPMU_SIAR_VALID)
- sdsync = POWER7P_MMCRA_SDAR_VALID;
- else if (ppmu->flags & PPMU_ALT_SIPR)
- sdsync = POWER6_MMCRA_SDSYNC;
- else
- sdsync = MMCRA_SDSYNC;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ sdar_valid = regs->dar & SIER_SDAR_VALID;
+ else {
+ unsigned long sdsync;
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ sdsync = POWER7P_MMCRA_SDAR_VALID;
+ else if (ppmu->flags & PPMU_ALT_SIPR)
+ sdsync = POWER6_MMCRA_SDSYNC;
+ else
+ sdsync = MMCRA_SDSYNC;
+
+ sdar_valid = mmcra & sdsync;
+ }
+
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
}
-static bool mmcra_sihv(unsigned long mmcra)
+static bool regs_sihv(struct pt_regs *regs)
{
unsigned long sihv = MMCRA_SIHV;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return !!(regs->dar & SIER_SIHV);
+
if (ppmu->flags & PPMU_ALT_SIPR)
sihv = POWER6_MMCRA_SIHV;
- return !!(mmcra & sihv);
+ return !!(regs->dsisr & sihv);
}
-static bool mmcra_sipr(unsigned long mmcra)
+static bool regs_sipr(struct pt_regs *regs)
{
unsigned long sipr = MMCRA_SIPR;
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return !!(regs->dar & SIER_SIPR);
+
if (ppmu->flags & PPMU_ALT_SIPR)
sipr = POWER6_MMCRA_SIPR;
- return !!(mmcra & sipr);
+ return !!(regs->dsisr & sipr);
}
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
@@ -161,8 +200,7 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
- unsigned long mmcra = regs->dsisr;
- unsigned long use_siar = regs->result;
+ bool use_siar = regs_use_siar(regs);
if (!use_siar)
return perf_flags_from_msr(regs);
@@ -181,16 +219,19 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
}
/* PR has priority over HV, so order below is important */
- if (mmcra_sipr(mmcra))
+ if (regs_sipr(regs))
return PERF_RECORD_MISC_USER;
- if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
+
+ if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
+
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
+ * Overload regs->dar to store SIER if we have it.
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).
*/
@@ -200,6 +241,11 @@ static inline void perf_read_regs(struct pt_regs *regs)
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
int use_siar;
+ regs->dsisr = mmcra;
+
+ if (ppmu->flags & PPMU_HAS_SIER)
+ regs->dar = mfspr(SPRN_SIER);
+
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
* not valid. Use pt_regs.
@@ -223,12 +269,11 @@ static inline void perf_read_regs(struct pt_regs *regs)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
- else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
+ else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
use_siar = 0;
else
use_siar = 1;
- regs->dsisr = mmcra;
regs->result = use_siar;
}
@@ -253,12 +298,170 @@ static inline int siar_valid(struct pt_regs *regs)
unsigned long mmcra = regs->dsisr;
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
- if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
- return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ if (marked) {
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return regs->dar & SIER_SIAR_VALID;
+
+ if (ppmu->flags & PPMU_SIAR_VALID)
+ return mmcra & POWER7P_MMCRA_SIAR_VALID;
+ }
return 1;
}
+
+/* Reset all possible BHRB entries */
+static void power_pmu_bhrb_reset(void)
+{
+ asm volatile(PPC_CLRBHRB);
+}
+
+static void power_pmu_bhrb_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ /* Clear BHRB if we changed task context to avoid data leaks */
+ if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
+ power_pmu_bhrb_reset();
+ cpuhw->bhrb_context = event->ctx;
+ }
+ cpuhw->bhrb_users++;
+}
+
+static void power_pmu_bhrb_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ if (!ppmu->bhrb_nr)
+ return;
+
+ cpuhw->bhrb_users--;
+ WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+
+ if (!cpuhw->disabled && !cpuhw->bhrb_users) {
+ /* BHRB cannot be turned off when other
+ * events are active on the PMU.
+ */
+
+ /* avoid stale pointer */
+ cpuhw->bhrb_context = NULL;
+ }
+}
+
+/* Called from ctxsw to prevent one process's branch entries to
+ * mingle with the other process's entries during context switch.
+ */
+void power_pmu_flush_branch_stack(void)
+{
+ if (ppmu->bhrb_nr)
+ power_pmu_bhrb_reset();
+}
+/* Calculate the to address for a branch */
+static __u64 power_pmu_bhrb_to(u64 addr)
+{
+ unsigned int instr;
+ int ret;
+ __u64 target;
+
+ if (is_kernel_addr(addr))
+ return branch_target((unsigned int *)addr);
+
+ /* Userspace: need copy instruction here then translate it */
+ pagefault_disable();
+ ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
+ if (ret) {
+ pagefault_enable();
+ return 0;
+ }
+ pagefault_enable();
+
+ target = branch_target(&instr);
+ if ((!target) || (instr & BRANCH_ABSOLUTE))
+ return target;
+
+ /* Translate relative branch target from kernel to user address */
+ return target - (unsigned long)&instr + addr;
+}
+
+/* Processing BHRB entries */
+void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+{
+ u64 val;
+ u64 addr;
+ int r_index, u_index, pred;
+
+ r_index = 0;
+ u_index = 0;
+ while (r_index < ppmu->bhrb_nr) {
+ /* Assembly read function */
+ val = read_bhrb(r_index++);
+ if (!val)
+ /* Terminal marker: End of valid BHRB entries */
+ break;
+ else {
+ addr = val & BHRB_EA;
+ pred = val & BHRB_PREDICTION;
+
+ if (!addr)
+ /* invalid entry */
+ continue;
+
+ /* Branches are read most recent first (ie. mfbhrb 0 is
+ * the most recent branch).
+ * There are two types of valid entries:
+ * 1) a target entry which is the to address of a
+ * computed goto like a blr,bctr,btar. The next
+ * entry read from the bhrb will be branch
+ * corresponding to this target (ie. the actual
+ * blr/bctr/btar instruction).
+ * 2) a from address which is an actual branch. If a
+ * target entry proceeds this, then this is the
+ * matching branch for that target. If this is not
+ * following a target entry, then this is a branch
+ * where the target is given as an immediate field
+ * in the instruction (ie. an i or b form branch).
+ * In this case we need to read the instruction from
+ * memory to determine the target/to address.
+ */
+
+ if (val & BHRB_TARGET) {
+ /* Target branches use two entries
+ * (ie. computed gotos/XL form)
+ */
+ cpuhw->bhrb_entries[u_index].to = addr;
+ cpuhw->bhrb_entries[u_index].mispred = pred;
+ cpuhw->bhrb_entries[u_index].predicted = ~pred;
+
+ /* Get from address in next entry */
+ val = read_bhrb(r_index++);
+ addr = val & BHRB_EA;
+ if (val & BHRB_TARGET) {
+ /* Shouldn't have two targets in a
+ row.. Reset index and try again */
+ r_index--;
+ addr = 0;
+ }
+ cpuhw->bhrb_entries[u_index].from = addr;
+ } else {
+ /* Branches to immediate field
+ (ie I or B form) */
+ cpuhw->bhrb_entries[u_index].from = addr;
+ cpuhw->bhrb_entries[u_index].to =
+ power_pmu_bhrb_to(addr);
+ cpuhw->bhrb_entries[u_index].mispred = pred;
+ cpuhw->bhrb_entries[u_index].predicted = ~pred;
+ }
+ u_index++;
+
+ }
+ }
+ cpuhw->bhrb_stack.nr = u_index;
+ return;
+}
+
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -822,6 +1025,9 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
local_irq_restore(flags);
}
@@ -880,8 +1086,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+ /*
+ * This event may have been disabled/stopped in record_and_restart()
+ * because we exceeded the ->event_limit. If re-starting the event,
+ * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
+ * notification is re-enabled.
+ */
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ else
+ event->hw.state = 0;
/*
* If group events scheduling transaction was started,
@@ -903,6 +1117,9 @@ nocheck:
ret = 0;
out:
+ if (has_branch_stack(event))
+ power_pmu_bhrb_enable(event);
+
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
@@ -955,6 +1172,9 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
+ if (has_branch_stack(event))
+ power_pmu_bhrb_disable(event);
+
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1187,9 +1407,11 @@ static int power_pmu_event_init(struct perf_event *event)
if (!ppmu)
return -ENOENT;
- /* does not support taken branch sampling */
- if (has_branch_stack(event))
- return -EOPNOTSUPP;
+ if (has_branch_stack(event)) {
+ /* PMU has BHRB enabled */
+ if (!(ppmu->flags & PPMU_BHRB))
+ return -EOPNOTSUPP;
+ }
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
@@ -1270,6 +1492,15 @@ static int power_pmu_event_init(struct perf_event *event)
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
+
+ if (has_branch_stack(event)) {
+ cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if(cpuhw->bhrb_filter == -1)
+ return -EOPNOTSUPP;
+ }
+
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
@@ -1305,6 +1536,16 @@ static int power_pmu_event_idx(struct perf_event *event)
return event->hw.idx;
}
+ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
@@ -1318,9 +1559,9 @@ struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
+ .flush_branch_stack = power_pmu_flush_branch_stack,
};
-
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -1349,6 +1590,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
+ if (delta == 0)
+ left++;
if (period) {
if (left <= 0) {
left += period;
@@ -1377,6 +1620,13 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
+ if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
+ struct cpu_hw_events *cpuhw;
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ power_pmu_bhrb_read(cpuhw);
+ data.br_stack = &cpuhw->bhrb_stack;
+ }
+
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
@@ -1402,7 +1652,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long use_siar = regs->result;
+ bool use_siar = regs_use_siar(regs);
if (use_siar && siar_valid(regs))
return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
@@ -1412,11 +1662,8 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
return regs->nip;
}
-static bool pmc_overflow(unsigned long val)
+static bool pmc_overflow_power7(unsigned long val)
{
- if ((int)val < 0)
- return true;
-
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
@@ -1428,7 +1675,15 @@ static bool pmc_overflow(unsigned long val)
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
+ if ((0x80000000 - val) <= 256)
+ return true;
+
+ return false;
+}
+
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
return true;
return false;
@@ -1439,11 +1694,11 @@ static bool pmc_overflow(unsigned long val)
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
- int i;
+ int i, j;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
- unsigned long val;
- int found = 0;
+ unsigned long val[8];
+ int found, active;
int nmi;
if (cpuhw->n_limited)
@@ -1458,33 +1713,53 @@ static void perf_event_interrupt(struct pt_regs *regs)
else
irq_enter();
- for (i = 0; i < cpuhw->n_events; ++i) {
- event = cpuhw->event[i];
- if (!event->hw.idx || is_limited_pmc(event->hw.idx))
+ /* Read all the PMCs since we'll need them a bunch of times */
+ for (i = 0; i < ppmu->n_counter; ++i)
+ val[i] = read_pmc(i + 1);
+
+ /* Try to find what caused the IRQ */
+ found = 0;
+ for (i = 0; i < ppmu->n_counter; ++i) {
+ if (!pmc_overflow(val[i]))
continue;
- val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
- /* event has overflowed */
- found = 1;
- record_and_restart(event, val, regs);
+ if (is_limited_pmc(i + 1))
+ continue; /* these won't generate IRQs */
+ /*
+ * We've found one that's overflowed. For active
+ * counters we need to log this. For inactive
+ * counters, we need to reset it anyway
+ */
+ found = 1;
+ active = 0;
+ for (j = 0; j < cpuhw->n_events; ++j) {
+ event = cpuhw->event[j];
+ if (event->hw.idx == (i + 1)) {
+ active = 1;
+ record_and_restart(event, val[i], regs);
+ break;
+ }
}
+ if (!active)
+ /* reset non active counters that have overflowed */
+ write_pmc(i + 1, 0);
}
-
- /*
- * In case we didn't find and reset the event that caused
- * the interrupt, scan all events and reset any that are
- * negative, to avoid getting continual interrupts.
- * Any that we processed in the previous loop will not be negative.
- */
- if (!found) {
- for (i = 0; i < ppmu->n_counter; ++i) {
- if (is_limited_pmc(i + 1))
+ if (!found && pvr_version_is(PVR_POWER7)) {
+ /* check active counters for special buggy p7 overflow */
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- val = read_pmc(i + 1);
- if (pmc_overflow(val))
- write_pmc(i + 1, 0);
+ if (pmc_overflow_power7(val[event->hw.idx - 1])) {
+ /* event has overflowed in a buggy way*/
+ found = 1;
+ record_and_restart(event,
+ val[event->hw.idx - 1],
+ regs);
+ }
}
}
+ if (!found && !nmi && printk_ratelimit())
+ printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
@@ -1537,6 +1812,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
+ power_pmu.attr_groups = ppmu->attr_groups;
+
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index cb2e294..fb66492 100644
--- a/arch/powerpc/perf/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
@@ -24,6 +24,8 @@ static int e500_generic_events[] = {
[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index a8757ba..b03b6dc 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -671,7 +671,7 @@ static struct power_pmu power5p_pmu = {
.get_alternatives = power5p_get_alternatives,
.disable_pmc = power5p_disable_pmc,
.limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_HAS_SSLOT,
.n_generic = ARRAY_SIZE(power5p_generic_events),
.generic_events = power5p_generic_events,
.cache_events = &power5p_cache_events,
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index e7f06eb..1e8ce42 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -615,6 +615,7 @@ static struct power_pmu power5_pmu = {
.n_generic = ARRAY_SIZE(power5_generic_events),
.generic_events = power5_generic_events,
.cache_events = &power5_cache_events,
+ .flags = PPMU_HAS_SSLOT,
};
static int __init init_power5_pmu(void)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 2ee01e3..3c475d6 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -51,6 +51,18 @@
#define MMCR1_PMCSEL_MSK 0xff
/*
+ * Power7 event codes.
+ */
+#define PME_PM_CYC 0x1e
+#define PME_PM_GCT_NOSLOT_CYC 0x100f8
+#define PME_PM_CMPLU_STALL 0x4000a
+#define PME_PM_INST_CMPL 0x2
+#define PME_PM_LD_REF_L1 0xc880
+#define PME_PM_LD_MISS_L1 0x400f0
+#define PME_PM_BRU_FIN 0x10068
+#define PME_PM_BRU_MPRED 0x400f6
+
+/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
}
static int power7_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */
- [PERF_COUNT_HW_INSTRUCTIONS] = 2,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
- [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
+ [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,70 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
+
+GENERIC_EVENT_ATTR(cpu-cycles, CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED);
+
+POWER_EVENT_ATTR(CYC, CYC);
+POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
+POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
+POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
+POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
+POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
+POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
+POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED);
+
+static struct attribute *power7_events_attr[] = {
+ GENERIC_EVENT_PTR(CYC),
+ GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(CMPLU_STALL),
+ GENERIC_EVENT_PTR(INST_CMPL),
+ GENERIC_EVENT_PTR(LD_REF_L1),
+ GENERIC_EVENT_PTR(LD_MISS_L1),
+ GENERIC_EVENT_PTR(BRU_FIN),
+ GENERIC_EVENT_PTR(BRU_MPRED),
+
+ POWER_EVENT_PTR(CYC),
+ POWER_EVENT_PTR(GCT_NOSLOT_CYC),
+ POWER_EVENT_PTR(CMPLU_STALL),
+ POWER_EVENT_PTR(INST_CMPL),
+ POWER_EVENT_PTR(LD_REF_L1),
+ POWER_EVENT_PTR(LD_MISS_L1),
+ POWER_EVENT_PTR(BRU_FIN),
+ POWER_EVENT_PTR(BRU_MPRED),
+ NULL
+};
+
+
+static struct attribute_group power7_pmu_events_group = {
+ .name = "events",
+ .attrs = power7_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-19");
+
+static struct attribute *power7_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+struct attribute_group power7_pmu_format_group = {
+ .name = "format",
+ .attrs = power7_pmu_format_attr,
+};
+
+static const struct attribute_group *power7_pmu_attr_groups[] = {
+ &power7_pmu_format_group,
+ &power7_pmu_events_group,
+ NULL,
+};
+
static struct power_pmu power7_pmu = {
.name = "POWER7",
.n_counter = 6,
@@ -373,6 +449,7 @@ static struct power_pmu power7_pmu = {
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
.flags = PPMU_ALT_SIPR,
+ .attr_groups = power7_pmu_attr_groups,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
new file mode 100644
index 0000000..f7d1c4f
--- /dev/null
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -0,0 +1,592 @@
+/*
+ * Performance counter support for POWER8 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <asm/firmware.h>
+
+
+/*
+ * Some power8 event codes.
+ */
+#define PM_CYC 0x0001e
+#define PM_GCT_NOSLOT_CYC 0x100f8
+#define PM_CMPLU_STALL 0x4000a
+#define PM_INST_CMPL 0x00002
+#define PM_BRU_FIN 0x10068
+#define PM_BR_MPRED_CMPL 0x400f6
+
+
+/*
+ * Raw event encoding for POWER8:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ thresh_cmp ] [ thresh_ctl ]
+ * |
+ * thresh start/stop OR FAB match -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
+ * | | | | |
+ * | | | | *- mark
+ * | | *- L1/L2/L3 cache_sel |
+ * | | |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[x] = combine (PMCxCOMB)
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * # PM_MRK_FAB_RSP_MATCH
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * # PM_MRK_FAB_RSP_MATCH_CYC
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ * MMCRA[22:24] = thresh_cmp[0:2]
+ * MMCRA[25:31] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ * if cache_sel[0] == 0: # L3 bank
+ * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
+ * else if cache_sel[0] == 1:
+ * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ */
+
+#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
+#define EVENT_THR_CMP_MASK 0x3ff
+#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
+#define EVENT_THR_CTL_MASK 0xffull
+#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
+#define EVENT_THR_SEL_MASK 0x7
+#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
+#define EVENT_THRESH_MASK 0x1fffffull
+#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
+#define EVENT_SAMPLE_MASK 0x1f
+#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
+#define EVENT_CACHE_SEL_MASK 0xf
+#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
+#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
+#define EVENT_PMC_MASK 0xf
+#define EVENT_UNIT_SHIFT 12 /* Unit */
+#define EVENT_UNIT_MASK 0xf
+#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
+#define EVENT_COMBINE_MASK 0x1
+#define EVENT_MARKED_SHIFT 8 /* Marked bit */
+#define EVENT_MARKED_MASK 0x1
+#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
+#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+
+/* MMCRA IFM bits - POWER8 */
+#define POWER8_MMCRA_IFM1 0x0000000040000000UL
+#define POWER8_MMCRA_IFM2 0x0000000080000000UL
+#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+
+#define ONLY_PLM \
+ (PERF_SAMPLE_BRANCH_USER |\
+ PERF_SAMPLE_BRANCH_KERNEL |\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Layout of constraint bits:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
+ * |
+ * thresh_sel -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
+ * | |
+ * L1 I/D qualifier -* | Count of events for each PMC.
+ * | p1, p2, p3, p4, p5, p6.
+ * nc - number of counters -*
+ *
+ * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
+ * we want the low bit of each field to be added to any existing value.
+ *
+ * Everything else is a value field.
+ */
+
+#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
+#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
+
+/* We just throw all the threshold bits into the constraint */
+#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
+#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
+
+#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
+#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
+
+#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
+#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
+
+/*
+ * For NC we are counting up to 4 events. This requires three bits, and we need
+ * the fifth event to overflow and set the 4th bit. To achieve that we bias the
+ * fields by 3 in test_adder.
+ */
+#define CNST_NC_SHIFT 12
+#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
+#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
+#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
+
+/*
+ * For the per-PMC fields we have two bits. The low bit is added, so if two
+ * events ask for the same PMC the sum will overflow, setting the high bit,
+ * indicating an error. So our mask sets the high bit.
+ */
+#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
+#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
+#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
+
+/* Our add_fields is defined as: */
+#define POWER8_ADD_FIELDS \
+ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
+ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+
+
+/* Bits in MMCR1 for POWER8 */
+#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
+#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_DC_QUAL_SHIFT 47
+#define MMCR1_IC_QUAL_SHIFT 46
+
+/* Bits in MMCRA for POWER8 */
+#define MMCRA_SAMP_MODE_SHIFT 1
+#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_THR_CTL_SHIFT 8
+#define MMCRA_THR_SEL_SHIFT 16
+#define MMCRA_THR_CMP_SHIFT 32
+#define MMCRA_SDAR_MODE_TLB (1ull << 42)
+
+
+static inline bool event_is_fab_match(u64 event)
+{
+ /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
+ event &= 0xff0fe;
+
+ /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
+ return (event == 0x30056 || event == 0x4f052);
+}
+
+static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+{
+ unsigned int unit, pmc, cache;
+ unsigned long mask, value;
+
+ mask = value = 0;
+
+ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+
+ mask |= CNST_PMC_MASK(pmc);
+ value |= CNST_PMC_VAL(pmc);
+
+ if (pmc >= 5 && event != 0x500fa && event != 0x600f4)
+ return -1;
+ }
+
+ if (pmc <= 4) {
+ /*
+ * Add to number of counters in use. Note this includes events with
+ * a PMC of 0 - they still need a PMC, it's just assigned later.
+ * Don't count events on PMC 5 & 6, there is only one valid event
+ * on each of those counters, and they are handled above.
+ */
+ mask |= CNST_NC_MASK;
+ value |= CNST_NC_VAL;
+ }
+
+ if (unit >= 6 && unit <= 9) {
+ /*
+ * L2/L3 events contain a cache selector field, which is
+ * supposed to be programmed into MMCRC. However MMCRC is only
+ * HV writable, and there is no API for guest kernels to modify
+ * it. The solution is for the hypervisor to initialise the
+ * field to zeroes, and for us to only ever allow events that
+ * have a cache selector of zero.
+ */
+ if (cache)
+ return -1;
+
+ } else if (event & EVENT_IS_L1) {
+ mask |= CNST_L1_QUAL_MASK;
+ value |= CNST_L1_QUAL_VAL(cache);
+ }
+
+ if (event & EVENT_IS_MARKED) {
+ mask |= CNST_SAMPLE_MASK;
+ value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
+ }
+
+ /*
+ * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold control bits are used for the match value.
+ */
+ if (event_is_fab_match(event)) {
+ mask |= CNST_FAB_MATCH_MASK;
+ value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
+ } else {
+ /*
+ * Check the mantissa upper two bits are not zero, unless the
+ * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
+ */
+ unsigned int cmp, exp;
+
+ cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ exp = cmp >> 7;
+
+ if (exp && (cmp & 0x60) == 0)
+ return -1;
+
+ mask |= CNST_THRESH_MASK;
+ value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
+ }
+
+ *maskp = mask;
+ *valp = value;
+
+ return 0;
+}
+
+static int power8_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[])
+{
+ unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
+ unsigned int pmc, pmc_inuse;
+ int i;
+
+ pmc_inuse = 0;
+
+ /* First pass to count resource use */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ if (pmc)
+ pmc_inuse |= 1 << pmc;
+ }
+
+ /* In continous sampling mode, update SDAR on TLB miss */
+ mmcra = MMCRA_SDAR_MODE_TLB;
+ mmcr1 = 0;
+
+ /* Second pass: assign PMCs, set all MMCR1 fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+ psel = event[i] & EVENT_PSEL_MASK;
+
+ if (!pmc) {
+ for (pmc = 1; pmc <= 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+
+ pmc_inuse |= 1 << pmc;
+ }
+
+ if (pmc <= 4) {
+ mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
+ mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+ mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
+ }
+
+ if (event[i] & EVENT_IS_L1) {
+ cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
+ mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
+ cache >>= 1;
+ mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+ }
+
+ if (event[i] & EVENT_IS_MARKED) {
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+
+ val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val) {
+ mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
+ mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
+ }
+ }
+
+ /*
+ * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold bits are used for the match value.
+ */
+ if (event_is_fab_match(event[i])) {
+ mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK;
+ } else {
+ val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ mmcra |= val << MMCRA_THR_CTL_SHIFT;
+ val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
+ mmcra |= val << MMCRA_THR_SEL_SHIFT;
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ mmcra |= val << MMCRA_THR_CMP_SHIFT;
+ }
+
+ hwc[i] = pmc - 1;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+
+ /* pmc_inuse is 1-based */
+ if (pmc_inuse & 2)
+ mmcr[0] = MMCR0_PMC1CE;
+
+ if (pmc_inuse & 0x7c)
+ mmcr[0] |= MMCR0_PMCjCE;
+
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+
+ return 0;
+}
+
+#define MAX_ALT 2
+
+/* Table of alternatives, sorted by column 0 */
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
+ { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
+ { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
+ { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
+ { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
+ { 0x20036, 0x40036 }, /* PM_BR_2PATH */
+ { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
+ { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
+ { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
+ { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
+ { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+
+ return -1;
+}
+
+static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, num_alt = 0;
+ u64 alt_event;
+
+ alt[num_alt++] = event;
+
+ i = find_alternative(event);
+ if (i >= 0) {
+ /* Filter out the original event, it's already in alt[0] */
+ for (j = 0; j < MAX_ALT; ++j) {
+ alt_event = event_alternatives[i][j];
+ if (alt_event && alt_event != event)
+ alt[num_alt++] = alt_event;
+ }
+ }
+
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state, so PM_CYC is equivalent to
+ * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
+ */
+ j = num_alt;
+ for (i = 0; i < num_alt; ++i) {
+ switch (alt[i]) {
+ case 0x1e: /* PM_CYC */
+ alt[j++] = 0x600f4; /* PM_RUN_CYC */
+ break;
+ case 0x600f4: /* PM_RUN_CYC */
+ alt[j++] = 0x1e;
+ break;
+ case 0x2: /* PM_PPC_CMPL */
+ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
+ break;
+ case 0x500fa: /* PM_RUN_INST_CMPL */
+ alt[j++] = 0x2; /* PM_PPC_CMPL */
+ break;
+ }
+ }
+ num_alt = j;
+ }
+
+ return num_alt;
+}
+
+static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
+}
+
+PMU_FORMAT_ATTR(event, "config:0-49");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+
+static struct attribute *power8_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+struct attribute_group power8_pmu_format_group = {
+ .name = "format",
+ .attrs = power8_pmu_format_attr,
+};
+
+static const struct attribute_group *power8_pmu_attr_groups[] = {
+ &power8_pmu_format_group,
+ NULL,
+};
+
+static int power8_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+};
+
+static u64 power8_bhrb_filter_map(u64 branch_sample_type)
+{
+ u64 pmu_bhrb_filter = 0;
+ u64 br_privilege = branch_sample_type & ONLY_PLM;
+
+ /* BHRB and regular PMU events share the same prvillege state
+ * filter configuration. BHRB is always recorded along with a
+ * regular PMU event. So privilege state filter criteria for BHRB
+ * and the companion PMU events has to be the same. As a default
+ * "perf record" tool sets all privillege bits ON when no filter
+ * criteria is provided in the command line. So as along as all
+ * privillege bits are ON or they are OFF, we are good to go.
+ */
+ if ((br_privilege != 7) && (br_privilege != 0))
+ return -1;
+
+ /* No branch filter requested */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ return pmu_bhrb_filter;
+
+ /* Invalid branch filter options - HW does not support */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
+ return pmu_bhrb_filter;
+ }
+
+ /* Every thing else is unsupported */
+ return -1;
+}
+
+static void power8_config_bhrb(u64 pmu_bhrb_filter)
+{
+ /* Enable BHRB filter in PMU */
+ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
+}
+
+static struct power_pmu power8_pmu = {
+ .name = "POWER8",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = POWER8_ADD_FIELDS,
+ .test_adder = POWER8_TEST_ADDER,
+ .compute_mmcr = power8_compute_mmcr,
+ .config_bhrb = power8_config_bhrb,
+ .bhrb_filter_map = power8_bhrb_filter_map,
+ .get_constraint = power8_get_constraint,
+ .get_alternatives = power8_get_alternatives,
+ .disable_pmc = power8_disable_pmc,
+ .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
+ .n_generic = ARRAY_SIZE(power8_generic_events),
+ .generic_events = power8_generic_events,
+ .attr_groups = power8_pmu_attr_groups,
+ .bhrb_nr = 32,
+};
+
+static int __init init_power8_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ return -ENODEV;
+
+ return register_power_pmu(&power8_pmu);
+}
+early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index a392d12..6e287f1 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -20,7 +20,6 @@ config HOTFOOT
bool "Hotfoot"
depends on 40x
default n
- select 405EP
select PPC40x_SIMPLE
select PCI
help
@@ -105,9 +104,6 @@ config 405GP
select IBM405_ERR51
select IBM_EMAC_ZMII
-config 405EP
- bool
-
config 405EX
bool
select IBM_EMAC_EMAC4
@@ -119,9 +115,6 @@ config 405EZ
select IBM_EMAC_MAL_CLR_ICINTSTAT
select IBM_EMAC_MAL_COMMON_ERR
-config 405GPR
- bool
-
config XILINX_VIRTEX
bool
select DEFAULT_UIMAGE
@@ -145,7 +138,6 @@ config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 40x
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
Enable gpiolib support for ppc40x based boards
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 8abf6fb8..d6c7506 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -248,10 +248,17 @@ config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 44x
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
Enable gpiolib support for ppc440 based boards
+config PPC4xx_OCM
+ bool "PPC4xx On Chip Memory (OCM) support"
+ depends on 4xx
+ select PPC_LIB_RHEAP
+ help
+ Enable OCM support for PowerPC 4xx platforms with on chip memory,
+ OCM provides the fast place for memory access to improve performance.
+
# 44x specific CPU modules, selected based on the board above.
config 440EP
bool
@@ -266,6 +273,8 @@ config 440EPX
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config 440GRX
bool
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c169998..fc9c1cb 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,6 +7,8 @@ config PPC_MPC512x
select PPC_PCI_CHOICE
select FSL_PCI if PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
@@ -15,16 +17,16 @@ config MPC5121_ADS
help
This option enables support for the MPC5121E ADS board.
-config MPC5121_GENERIC
- bool "Generic support for simple MPC5121 based boards"
+config MPC512x_GENERIC
+ bool "Generic support for simple MPC512x based boards"
depends on PPC_MPC512x
select DEFAULT_UIMAGE
help
- This option enables support for simple MPC5121 based boards
+ This option enables support for simple MPC512x based boards
which do not need custom platform specific setup.
Compatible boards include: Protonic LVT base boards (ZANMCU
- and VICVT2).
+ and VICVT2), Freescale MPC5125 Tower system.
config PDM360NG
bool "ifm PDM360NG board"
diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile
index 4efc1c4..72fb934 100644
--- a/arch/powerpc/platforms/512x/Makefile
+++ b/arch/powerpc/platforms/512x/Makefile
@@ -3,5 +3,5 @@
#
obj-y += clock.o mpc512x_shared.o
obj-$(CONFIG_MPC5121_ADS) += mpc5121_ads.o mpc5121_ads_cpld.o
-obj-$(CONFIG_MPC5121_GENERIC) += mpc5121_generic.o
+obj-$(CONFIG_MPC512x_GENERIC) += mpc512x_generic.o
obj-$(CONFIG_PDM360NG) += pdm360ng.o
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 9f771e0..e504166 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -26,8 +26,11 @@
#include <linux/of_platform.h>
#include <asm/mpc5xxx.h>
+#include <asm/mpc5121.h>
#include <asm/clk_interface.h>
+#include "mpc512x.h"
+
#undef CLK_DEBUG
static int clocks_initialized;
@@ -122,7 +125,7 @@ struct mpc512x_clockctl {
u32 dccr; /* DIU Clk Cnfg Reg */
};
-struct mpc512x_clockctl __iomem *clockctl;
+static struct mpc512x_clockctl __iomem *clockctl;
static int mpc5121_clk_enable(struct clk *clk)
{
@@ -184,7 +187,7 @@ static unsigned long spmf_mult(void)
36, 40, 44, 48,
52, 56, 60, 64
};
- int spmf = (clockctl->spmr >> 24) & 0xf;
+ int spmf = (in_be32(&clockctl->spmr) >> 24) & 0xf;
return spmf_to_mult[spmf];
}
@@ -206,7 +209,7 @@ static unsigned long sysdiv_div_x_2(void)
52, 56, 58, 62,
60, 64, 66,
};
- int sysdiv = (clockctl->scfr2 >> 26) & 0x3f;
+ int sysdiv = (in_be32(&clockctl->scfr2) >> 26) & 0x3f;
return sysdiv_to_div_x_2[sysdiv];
}
@@ -230,7 +233,7 @@ static unsigned long sys_to_ref(unsigned long rate)
static long ips_to_ref(unsigned long rate)
{
- int ips_div = (clockctl->scfr1 >> 23) & 0x7;
+ int ips_div = (in_be32(&clockctl->scfr1) >> 23) & 0x7;
rate *= ips_div; /* csb_clk = ips_clk * ips_div */
rate *= 2; /* sys_clk = csb_clk * 2 */
@@ -284,7 +287,7 @@ static struct clk sys_clk = {
static void diu_clk_calc(struct clk *clk)
{
- int diudiv_x_2 = clockctl->scfr1 & 0xff;
+ int diudiv_x_2 = in_be32(&clockctl->scfr1) & 0xff;
unsigned long rate;
rate = sys_clk.rate;
@@ -311,7 +314,7 @@ static void half_clk_calc(struct clk *clk)
static void generic_div_clk_calc(struct clk *clk)
{
- int div = (clockctl->scfr1 >> clk->div_shift) & 0x7;
+ int div = (in_be32(&clockctl->scfr1) >> clk->div_shift) & 0x7;
clk->rate = clk->parent->rate / div;
}
@@ -329,7 +332,7 @@ static struct clk csb_clk = {
static void e300_clk_calc(struct clk *clk)
{
- int spmf = (clockctl->spmr >> 16) & 0xf;
+ int spmf = (in_be32(&clockctl->spmr) >> 16) & 0xf;
int ratex2 = clk->parent->rate * spmf;
clk->rate = ratex2 / 2;
@@ -551,7 +554,7 @@ static struct clk ac97_clk = {
.calc = ac97_clk_calc,
};
-struct clk *rate_clks[] = {
+static struct clk *rate_clks[] = {
&ref_clk,
&sys_clk,
&diu_clk,
@@ -607,7 +610,7 @@ static void rate_clks_init(void)
* There are two clk enable registers with 32 enable bits each
* psc clocks and device clocks are all stored in dev_clks
*/
-struct clk dev_clks[2][32];
+static struct clk dev_clks[2][32];
/*
* Given a psc number return the dev_clk
@@ -648,12 +651,12 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np)
out_be32(&clockctl->pccr[pscnum], 0x00020000);
out_be32(&clockctl->pccr[pscnum], 0x00030000);
- if (clockctl->pccr[pscnum] & 0x80) {
+ if (in_be32(&clockctl->pccr[pscnum]) & 0x80) {
clk->rate = spdif_rxclk.rate;
return;
}
- switch ((clockctl->pccr[pscnum] >> 14) & 0x3) {
+ switch ((in_be32(&clockctl->pccr[pscnum]) >> 14) & 0x3) {
case 0:
mclk_src = sys_clk.rate;
break;
@@ -668,7 +671,7 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np)
break;
}
- mclk_div = ((clockctl->pccr[pscnum] >> 17) & 0x7fff) + 1;
+ mclk_div = ((in_be32(&clockctl->pccr[pscnum]) >> 17) & 0x7fff) + 1;
clk->rate = mclk_src / mclk_div;
}
@@ -680,13 +683,17 @@ static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np)
static void psc_clks_init(void)
{
struct device_node *np;
- const u32 *cell_index;
struct platform_device *ofdev;
+ u32 reg;
+ const char *psc_compat;
+
+ psc_compat = mpc512x_select_psc_compat();
+ if (!psc_compat)
+ return;
- for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
- cell_index = of_get_property(np, "cell-index", NULL);
- if (cell_index) {
- int pscnum = *cell_index;
+ for_each_compatible_node(np, NULL, psc_compat) {
+ if (!of_property_read_u32(np, "reg", &reg)) {
+ int pscnum = (reg & 0xf00) >> 8;
struct clk *clk = psc_dev_clk(pscnum);
clk->flags = CLK_HAS_RATE | CLK_HAS_CTRL;
@@ -696,7 +703,7 @@ static void psc_clks_init(void)
* AC97 is special rate clock does
* not go through normal path
*/
- if (strcmp("ac97", np->name) == 0)
+ if (of_device_is_compatible(np, "fsl,mpc5121-psc-ac97"))
clk->rate = ac97_clk.rate;
else
psc_calc_rate(clk, pscnum, np);
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index c32b399..0a8e600 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -15,6 +15,7 @@ extern void __init mpc512x_init_IRQ(void);
extern void __init mpc512x_init(void);
extern int __init mpc5121_clk_init(void);
void __init mpc512x_declare_of_platform_devices(void);
+extern const char *mpc512x_select_psc_compat(void);
extern void mpc512x_restart(char *cmd);
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index ca1ca66..5fb919b 100644
--- a/arch/powerpc/platforms/512x/mpc5121_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -4,7 +4,7 @@
* Author: John Rigby, <jrigby@freescale.com>
*
* Description:
- * MPC5121 SoC setup
+ * MPC512x SoC setup
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
@@ -28,20 +28,22 @@
*/
static const char * const board[] __initconst = {
"prt,prtlvt",
+ "fsl,mpc5125ads",
+ "ifm,ac14xx",
NULL
};
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init mpc5121_generic_probe(void)
+static int __init mpc512x_generic_probe(void)
{
return of_flat_dt_match(of_get_flat_dt_root(), board);
}
-define_machine(mpc5121_generic) {
- .name = "MPC5121 generic",
- .probe = mpc5121_generic_probe,
+define_machine(mpc512x_generic) {
+ .name = "MPC512x generic",
+ .probe = mpc512x_generic_probe,
.init = mpc512x_init,
.init_early = mpc512x_init_diu,
.setup_arch = mpc512x_setup_diu,
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index c7f47cf..6eb94ab 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
static inline void mpc512x_free_bootmem(struct page *page)
{
- __ClearPageReserved(page);
BUG_ON(PageTail(page));
BUG_ON(atomic_read(&page->_count) > 1);
- atomic_set(&page->_count, 1);
- __free_page(page);
- totalram_pages++;
+ free_reserved_page(page);
}
void mpc512x_release_bootmem(void)
@@ -330,26 +327,34 @@ void __init mpc512x_init_IRQ(void)
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "fsl,mpc5121-immr", },
{ .compatible = "fsl,mpc5121-localbus", },
+ { .compatible = "fsl,mpc5121-mbx", },
+ { .compatible = "fsl,mpc5121-nfc", },
+ { .compatible = "fsl,mpc5121-sram", },
+ { .compatible = "fsl,mpc5121-pci", },
+ { .compatible = "gpio-leds", },
{},
};
void __init mpc512x_declare_of_platform_devices(void)
{
- struct device_node *np;
-
if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
printk(KERN_ERR __FILE__ ": "
"Error while probing of_platform bus\n");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-nfc");
- if (np) {
- of_platform_device_create(np, NULL, NULL);
- of_node_put(np);
- }
}
#define DEFAULT_FIFO_SIZE 16
+const char *mpc512x_select_psc_compat(void)
+{
+ if (of_machine_is_compatible("fsl,mpc5121"))
+ return "fsl,mpc5121-psc";
+
+ if (of_machine_is_compatible("fsl,mpc5125"))
+ return "fsl,mpc5125-psc";
+
+ return NULL;
+}
+
static unsigned int __init get_fifo_size(struct device_node *np,
char *prop_name)
{
@@ -375,9 +380,16 @@ void __init mpc512x_psc_fifo_init(void)
void __iomem *psc;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
+ const char *psc_compat;
int fifobase = 0; /* current fifo address in 32 bit words */
- for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
+ psc_compat = mpc512x_select_psc_compat();
+ if (!psc_compat) {
+ pr_err("%s: no compatible devices found\n", __func__);
+ return;
+ }
+
+ for_each_compatible_node(np, NULL, psc_compat) {
tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
@@ -426,8 +438,38 @@ void __init mpc512x_psc_fifo_init(void)
void __init mpc512x_init(void)
{
- mpc512x_declare_of_platform_devices();
mpc5121_clk_init();
+ mpc512x_declare_of_platform_devices();
mpc512x_restart_init();
mpc512x_psc_fifo_init();
}
+
+/**
+ * mpc512x_cs_config - Setup chip select configuration
+ * @cs: chip select number
+ * @val: chip select configuration value
+ *
+ * Perform chip select configuration for devices on LocalPlus Bus.
+ * Intended to dynamically reconfigure the chip select parameters
+ * for configurable devices on the bus.
+ */
+int mpc512x_cs_config(unsigned int cs, u32 val)
+{
+ static struct mpc512x_lpc __iomem *lpc;
+ struct device_node *np;
+
+ if (cs > 7)
+ return -EINVAL;
+
+ if (!lpc) {
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-lpc");
+ lpc = of_iomap(np, 0);
+ of_node_put(np);
+ if (!lpc)
+ return -ENOMEM;
+ }
+
+ out_be32(&lpc->cs_cfg[cs], val);
+ return 0;
+}
+EXPORT_SYMBOL(mpc512x_cs_config);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index f9f4537..be7b1aa 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -20,9 +20,9 @@
#include <asm/mpc52xx.h>
#include <asm/time.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/bestcomm_priv.h>
-#include <sysdev/bestcomm/gen_bd.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/gen_bd.h>
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index cf964e1..058cc18 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -18,11 +18,11 @@
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/cpm2.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
-#include <asm/time.h>
+#include <linux/time.h>
#include <asm/mpc8260.h>
#include <asm/prom.h>
@@ -36,7 +36,7 @@ static void __init km82xx_pic_init(void)
struct device_node *np = of_find_compatible_node(NULL, NULL,
"fsl,pq2-pic");
if (!np) {
- printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
+ pr_err("PIC init: can not find cpm-pic node\n");
return;
}
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index fb94d10..fc8b2d6 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -71,11 +71,11 @@ err:
void __init pq2_init_pci(void)
{
- struct device_node *np = NULL;
+ struct device_node *np;
ppc_md.pci_exclude_device = pq2_pci_exclude_device;
- while ((np = of_find_compatible_node(np, NULL, "fsl,pq2-pci")))
+ for_each_compatible_node(np, NULL, "fsl,pq2-pci")
pq2_pci_add_bridge(np);
}
#endif
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 89923d7..bf4c447 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -28,8 +28,8 @@
#include <linux/of_device.h>
#include <linux/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
+#include <linux/time.h>
+#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
@@ -43,6 +43,82 @@
#include "mpc83xx.h"
#define SVR_REV(svr) (((svr) >> 0) & 0xFFFF) /* Revision field */
+
+static void quirk_mpc8360e_qe_enet10(void)
+{
+ /*
+ * handle mpc8360E Erratum QE_ENET10:
+ * RGMII AC values do not meet the specification
+ */
+ uint svid = mfspr(SPRN_SVR);
+ struct device_node *np_par;
+ struct resource res;
+ void __iomem *base;
+ int ret;
+
+ np_par = of_find_node_by_name(NULL, "par_io");
+ if (np_par == NULL) {
+ pr_warn("%s couldn;t find par_io node\n", __func__);
+ return;
+ }
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np_par, 0, &res);
+ if (ret) {
+ pr_warn("%s couldn;t map par_io registers\n", __func__);
+ return;
+ }
+
+ base = ioremap(res.start, res.end - res.start + 1);
+
+ /*
+ * set output delay adjustments to default values according
+ * table 5 in Errata Rev. 5, 9/2011:
+ *
+ * write 0b01 to UCC1 bits 18:19
+ * write 0b01 to UCC2 option 1 bits 4:5
+ * write 0b01 to UCC2 option 2 bits 16:17
+ */
+ clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000);
+
+ /*
+ * set output delay adjustments to default values according
+ * table 3-13 in Reference Manual Rev.3 05/2010:
+ *
+ * write 0b01 to UCC2 option 2 bits 16:17
+ * write 0b0101 to UCC1 bits 20:23
+ * write 0b0101 to UCC2 option 1 bits 24:27
+ */
+ clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550);
+
+ if (SVR_REV(svid) == 0x0021) {
+ /*
+ * UCC2 option 1: write 0b1010 to bits 24:27
+ * at address IMMRBAR+0x14AC
+ */
+ clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0);
+ } else if (SVR_REV(svid) == 0x0020) {
+ /*
+ * UCC1: write 0b11 to bits 18:19
+ * at address IMMRBAR+0x14A8
+ */
+ setbits32((base + 0xa8), 0x00003000);
+
+ /*
+ * UCC2 option 1: write 0b11 to bits 4:5
+ * at address IMMRBAR+0x14A8
+ */
+ setbits32((base + 0xa8), 0x0c000000);
+
+ /*
+ * UCC2 option 2: write 0b11 to bits 16:17
+ * at address IMMRBAR+0x14AC
+ */
+ setbits32((base + 0xac), 0x0000c000);
+ }
+ iounmap(base);
+ of_node_put(np_par);
+}
+
/* ************************************************************************
*
* Setup the architecture
@@ -72,84 +148,13 @@ static void __init mpc83xx_km_setup_arch(void)
for_each_node_by_name(np, "ucc")
par_io_of_config(np);
- }
-
- np = of_find_compatible_node(NULL, "network", "ucc_geth");
- if (np != NULL) {
- /*
- * handle mpc8360E Erratum QE_ENET10:
- * RGMII AC values do not meet the specification
- */
- uint svid = mfspr(SPRN_SVR);
- struct device_node *np_par;
- struct resource res;
- void __iomem *base;
- int ret;
-
- np_par = of_find_node_by_name(NULL, "par_io");
- if (np_par == NULL) {
- printk(KERN_WARNING "%s couldn;t find par_io node\n",
- __func__);
- return;
- }
- /* Map Parallel I/O ports registers */
- ret = of_address_to_resource(np_par, 0, &res);
- if (ret) {
- printk(KERN_WARNING "%s couldn;t map par_io registers\n",
- __func__);
- return;
- }
-
- base = ioremap(res.start, res.end - res.start + 1);
-
- /*
- * set output delay adjustments to default values according
- * table 5 in Errata Rev. 5, 9/2011:
- *
- * write 0b01 to UCC1 bits 18:19
- * write 0b01 to UCC2 option 1 bits 4:5
- * write 0b01 to UCC2 option 2 bits 16:17
- */
- clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000);
- /*
- * set output delay adjustments to default values according
- * table 3-13 in Reference Manual Rev.3 05/2010:
- *
- * write 0b01 to UCC2 option 2 bits 16:17
- * write 0b0101 to UCC1 bits 20:23
- * write 0b0101 to UCC2 option 1 bits 24:27
- */
- clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550);
-
- if (SVR_REV(svid) == 0x0021) {
- /*
- * UCC2 option 1: write 0b1010 to bits 24:27
- * at address IMMRBAR+0x14AC
- */
- clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0);
- } else if (SVR_REV(svid) == 0x0020) {
- /*
- * UCC1: write 0b11 to bits 18:19
- * at address IMMRBAR+0x14A8
- */
- setbits32((base + 0xa8), 0x00003000);
-
- /*
- * UCC2 option 1: write 0b11 to bits 4:5
- * at address IMMRBAR+0x14A8
- */
- setbits32((base + 0xa8), 0x0c000000);
-
- /*
- * UCC2 option 2: write 0b11 to bits 16:17
- * at address IMMRBAR+0x14AC
- */
- setbits32((base + 0xac), 0x0000c000);
+ /* Only apply this quirk when par_io is available */
+ np = of_find_compatible_node(NULL, "network", "ucc_geth");
+ if (np != NULL) {
+ quirk_mpc8360e_qe_enet10();
+ of_node_put(np);
}
- iounmap(base);
- of_node_put(np_par);
- of_node_put(np);
}
#endif /* CONFIG_QUICC_ENGINE */
}
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 48a56fd..60afff1 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -217,12 +217,18 @@ config SBC8548
help
This option enables support for the Wind River SBC8548 board
+config PPA8548
+ bool "Prodrive PPA8548"
+ help
+ This option enables support for the Prodrive PPA8548 board.
+ select DEFAULT_UIMAGE
+ select HAS_RAPIDIO
+
config GE_IMP3A
bool "GE Intelligent Platforms IMP3A"
select DEFAULT_UIMAGE
select SWIOTLB
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
@@ -278,6 +284,14 @@ config P4080_DS
help
This option enables support for the P4080 DS board
+config SGY_CTS1000
+ tristate "Servergy CTS-1000 support"
+ select GPIOLIB
+ select OF_GPIO
+ depends on P4080_DS
+ help
+ Enable this to support functionality in Servergy's CTS-1000 systems.
+
endif # PPC32
config P5020_DS
@@ -355,7 +369,7 @@ config B4_QDS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select GENERIC_GPIO
+ select GPIOLIB
select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index e018200..05ed088 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -34,8 +34,10 @@ obj-$(CONFIG_B4_QDS) += b4_qds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8548) += sbc8548.o
+obj-$(CONFIG_PPA8548) += ppa8548.o
obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o
obj-$(CONFIG_KSI8560) += ksi8560.o
obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o
obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o
obj-$(CONFIG_PPC_QEMU_E500) += qemu_e500.o
+obj-$(CONFIG_SGY_CTS1000) += sgy_cts1000.o
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
new file mode 100644
index 0000000..6a7704b
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -0,0 +1,98 @@
+/*
+ * ppa8548 setup and early boot code.
+ *
+ * Copyright 2009 Prodrive B.V..
+ *
+ * By Stef van Os (see MAINTAINERS for contact information)
+ *
+ * Based on the SBC8548 support - Copyright 2007 Wind River Systems Inc.
+ * Based on the MPC8548CDS support - Copyright 2005 Freescale Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <sysdev/fsl_soc.h>
+
+static void __init ppa8548_pic_init(void)
+{
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
+ 0, 256, " OpenPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+}
+
+/*
+ * Setup the architecture
+ */
+static void __init ppa8548_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("ppa8548_setup_arch()", 0);
+}
+
+static void ppa8548_show_cpuinfo(struct seq_file *m)
+{
+ uint32_t svid, phid1;
+
+ svid = mfspr(SPRN_SVR);
+
+ seq_printf(m, "Vendor\t\t: Prodrive B.V.\n");
+ seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+
+ /* Display cpu Pll setting */
+ phid1 = mfspr(SPRN_HID1);
+ seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+}
+
+static struct of_device_id __initdata of_bus_ids[] = {
+ { .name = "soc", },
+ { .type = "soc", },
+ { .compatible = "simple-bus", },
+ { .compatible = "gianfar", },
+ { .compatible = "fsl,srio", },
+ {},
+};
+
+static int __init declare_of_platform_devices(void)
+{
+ of_platform_bus_probe(NULL, of_bus_ids, NULL);
+
+ return 0;
+}
+machine_device_initcall(ppa8548, declare_of_platform_devices);
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init ppa8548_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "ppa8548");
+}
+
+define_machine(ppa8548) {
+ .name = "ppa8548",
+ .probe = ppa8548_probe,
+ .setup_arch = ppa8548_setup_arch,
+ .init_IRQ = ppa8548_pic_init,
+ .show_cpuinfo = ppa8548_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
new file mode 100644
index 0000000..7179726
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -0,0 +1,176 @@
+/*
+ * Servergy CTS-1000 Setup
+ *
+ * Maintained by Ben Collins <ben.c@servergy.com>
+ *
+ * Copyright 2012 by Servergy, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of_gpio.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <asm/machdep.h>
+
+static struct device_node *halt_node;
+
+static struct of_device_id child_match[] = {
+ {
+ .compatible = "sgy,gpio-halt",
+ },
+ {},
+};
+
+static void gpio_halt_wfn(struct work_struct *work)
+{
+ /* Likely wont return */
+ orderly_poweroff(true);
+}
+static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn);
+
+static void gpio_halt_cb(void)
+{
+ enum of_gpio_flags flags;
+ int trigger, gpio;
+
+ if (!halt_node)
+ return;
+
+ gpio = of_get_gpio_flags(halt_node, 0, &flags);
+
+ if (!gpio_is_valid(gpio))
+ return;
+
+ trigger = (flags == OF_GPIO_ACTIVE_LOW);
+
+ printk(KERN_INFO "gpio-halt: triggering GPIO.\n");
+
+ /* Probably wont return */
+ gpio_set_value(gpio, trigger);
+}
+
+/* This IRQ means someone pressed the power button and it is waiting for us
+ * to handle the shutdown/poweroff. */
+static irqreturn_t gpio_halt_irq(int irq, void *__data)
+{
+ printk(KERN_INFO "gpio-halt: shutdown due to power button IRQ.\n");
+ schedule_work(&gpio_halt_wq);
+
+ return IRQ_HANDLED;
+};
+
+static int gpio_halt_probe(struct platform_device *pdev)
+{
+ enum of_gpio_flags flags;
+ struct device_node *node = pdev->dev.of_node;
+ int gpio, err, irq;
+ int trigger;
+
+ if (!node)
+ return -ENODEV;
+
+ /* If there's no matching child, this isn't really an error */
+ halt_node = of_find_matching_node(node, child_match);
+ if (!halt_node)
+ return 0;
+
+ /* Technically we could just read the first one, but punish
+ * DT writers for invalid form. */
+ if (of_gpio_count(halt_node) != 1)
+ return -EINVAL;
+
+ /* Get the gpio number relative to the dynamic base. */
+ gpio = of_get_gpio_flags(halt_node, 0, &flags);
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ err = gpio_request(gpio, "gpio-halt");
+ if (err) {
+ printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n",
+ gpio);
+ halt_node = NULL;
+ return err;
+ }
+
+ trigger = (flags == OF_GPIO_ACTIVE_LOW);
+
+ gpio_direction_output(gpio, !trigger);
+
+ /* Now get the IRQ which tells us when the power button is hit */
+ irq = irq_of_parse_and_map(halt_node, 0);
+ err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, "gpio-halt", halt_node);
+ if (err) {
+ printk(KERN_ERR "gpio-halt: error requesting IRQ %d for "
+ "GPIO %d.\n", irq, gpio);
+ gpio_free(gpio);
+ halt_node = NULL;
+ return err;
+ }
+
+ /* Register our halt function */
+ ppc_md.halt = gpio_halt_cb;
+ ppc_md.power_off = gpio_halt_cb;
+
+ printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d"
+ " irq).\n", gpio, trigger, irq);
+
+ return 0;
+}
+
+static int gpio_halt_remove(struct platform_device *pdev)
+{
+ if (halt_node) {
+ int gpio = of_get_gpio(halt_node, 0);
+ int irq = irq_of_parse_and_map(halt_node, 0);
+
+ free_irq(irq, halt_node);
+
+ ppc_md.halt = NULL;
+ ppc_md.power_off = NULL;
+
+ gpio_free(gpio);
+
+ halt_node = NULL;
+ }
+
+ return 0;
+}
+
+static struct of_device_id gpio_halt_match[] = {
+ /* We match on the gpio bus itself and scan the children since they
+ * wont be matched against us. We know the bus wont match until it
+ * has been registered too. */
+ {
+ .compatible = "fsl,qoriq-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_halt_match);
+
+static struct platform_driver gpio_halt_driver = {
+ .driver = {
+ .name = "gpio-halt",
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_halt_match,
+ },
+ .probe = gpio_halt_probe,
+ .remove = gpio_halt_remove,
+};
+
+module_platform_driver(gpio_halt_driver);
+
+MODULE_DESCRIPTION("Driver to support GPIO triggered system halt for Servergy CTS-1000 Systems.");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Ben Collins <ben.c@servergy.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c
index c649b48..c68baaa 100644
--- a/arch/powerpc/platforms/85xx/t4240_qds.c
+++ b/arch/powerpc/platforms/85xx/t4240_qds.c
@@ -5,8 +5,8 @@
*
* Copyright 2012 Freescale Semiconductor Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 7a6279e..1afd1e4 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -37,7 +37,6 @@ config GEF_PPC9A
bool "GE PPC9A"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
@@ -47,7 +46,6 @@ config GEF_SBC310
bool "GE SBC310"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
@@ -57,7 +55,6 @@ config GEF_SBC610
bool "GE SBC610"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
select HAS_RAPIDIO
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 1fb0b3c..8dec3c0 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -114,7 +114,6 @@ config 8xx_COPYBACK
config 8xx_GPIO
bool "GPIO API Support"
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Saying Y here will cause the ports on an MPC8xx processor to be used
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 0cf4d6d..a1c5686 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -6,7 +6,6 @@ source "arch/powerpc/platforms/chrp/Kconfig"
source "arch/powerpc/platforms/512x/Kconfig"
source "arch/powerpc/platforms/52xx/Kconfig"
source "arch/powerpc/platforms/powermac/Kconfig"
-source "arch/powerpc/platforms/prep/Kconfig"
source "arch/powerpc/platforms/maple/Kconfig"
source "arch/powerpc/platforms/pasemi/Kconfig"
source "arch/powerpc/platforms/ps3/Kconfig"
@@ -150,7 +149,7 @@ config PPC_RTAS_DAEMON
config RTAS_PROC
bool "Proc interface to RTAS"
- depends on PPC_RTAS
+ depends on PPC_RTAS && PROC_FS
default y
config RTAS_FLASH
@@ -265,7 +264,7 @@ endmenu
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
- depends on 6xx && (PPC_PREP || PPC_PMAC)
+ depends on 6xx && PPC_PMAC
help
Some versions of the PPC601 (the first PowerPC chip) have bugs which
mean that extra synchronization instructions are required near
@@ -335,7 +334,6 @@ config QUICC_ENGINE
config QE_GPIO
bool "QE GPIO support"
depends on QUICC_ENGINE
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here if you're going to use hardware that connects to the
@@ -348,7 +346,6 @@ config CPM2
select PPC_LIB_RHEAP
select PPC_PCI_CHOICE
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
The CPM2 (Communications Processor Module) is a coprocessor on
embedded CPUs made by Freescale. Selecting this option means that
@@ -383,12 +380,9 @@ config OF_RTC
Uses information from the OF or flattened device tree to instantiate
platform devices for direct mapped RTC chips like the DS1742 or DS1743.
-source "arch/powerpc/sysdev/bestcomm/Kconfig"
-
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here to support simple, memory-mapped GPIO controllers.
@@ -399,7 +393,6 @@ config SIMPLE_GPIO
config MCU_MPC8349EMITX
bool "MPC8349E-mITX MCU driver"
depends on I2C=y && PPC_83xx
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here to enable soft power-off functionality on the Freescale
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 4943a65..57e32a3 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -76,6 +76,7 @@ config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
+ select PPC_DOORBELL
endchoice
@@ -123,9 +124,8 @@ config 6xx
select PPC_HAVE_PMU_SUPPORT
config POWER3
- bool
depends on PPC64 && PPC_BOOK3S
- default y if !POWER4_ONLY
+ def_bool y
config POWER4
depends on PPC64 && PPC_BOOK3S
@@ -144,8 +144,7 @@ config TUNE_CELL
but somewhat slower on other machines. This option only changes
the scheduling of instructions, not the selection of instructions
itself, so the resulting kernel will keep running on all other
- machines. When building a kernel that is supposed to run only
- on Cell, you should also select the POWER4_ONLY option.
+ machines.
# this is temp to handle compat with arch=ppc
config 8xx
@@ -285,6 +284,7 @@ config PPC_FSL_BOOK3E
select FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI
select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
+ select PPC_DOORBELL
default y if FSL_BOOKE
config PTE_64BIT
@@ -459,4 +459,8 @@ config NOT_COHERENT_CACHE
config CHECK_CACHE_COHERENCY
bool
+config PPC_DOORBELL
+ bool
+ default n
+
endmenu
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2e7ff0c..9978f59 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -113,34 +113,10 @@ config CBE_THERM
default m
depends on CBE_RAS && SPU_BASE
-config CBE_CPUFREQ
- tristate "CBE frequency scaling"
- depends on CBE_RAS && CPU_FREQ
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CBE_CPUFREQ_PMI_ENABLE
- bool "CBE frequency scaling using PMI interface"
- depends on CBE_CPUFREQ && EXPERIMENTAL
- default n
- help
- Select this, if you want to use the PMI interface
- to switch frequencies. Using PMI, the
- processor will not only be able to run at lower speed,
- but also at lower core voltage.
-
-config CBE_CPUFREQ_PMI
- tristate
- depends on CBE_CPUFREQ_PMI_ENABLE
- default CBE_CPUFREQ
-
config PPC_PMI
tristate
default y
- depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON
+ depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON
help
PMI (Platform Management Interrupt) is a way to
communicate with the BMC (Baseboard Management Controller).
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index a4a8935..fe053e7 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
-obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o
-obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o
-cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o
obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 0f6f839..246e1d8 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -90,7 +90,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
static long beat_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
@@ -103,9 +103,9 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -191,7 +191,7 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
u64 dummy0, dummy1;
unsigned long want_v;
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -228,7 +228,7 @@ static long beat_lpar_hpte_find(unsigned long vpn, int psize)
unsigned long want_v, hpte_v;
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -283,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
slot, va, psize, local);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
raw_spin_lock_irqsave(&beat_htab_lock, flags);
dummy1 = beat_lpar_hpte_getword0(slot);
@@ -314,7 +314,7 @@ void __init hpte_init_beat(void)
static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
@@ -327,9 +327,9 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
"rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+ hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -372,8 +372,8 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
unsigned long want_v;
unsigned long pss;
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
DBG_LOW(" update: "
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -402,8 +402,8 @@ static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
slot, vpn, psize, local);
- want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+ want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
deleted file mode 100644
index d4c39e3..0000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/cell-regs.h>
-#include "cbe_cpufreq.h"
-
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {1, 0},
- {2, 0},
- {3, 0},
- {4, 0},
- {5, 0},
- {6, 0},
- {8, 0},
- {10, 0},
- {0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- const u32 *max_freqp;
- u32 max_freq;
- int i, cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
- cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
- pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
- /* this ensures that policy->cpuinfo_min
- * and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- int rc;
- struct cpufreq_freqs freqs;
- unsigned int cbe_pmode_new;
-
- cpufreq_frequency_table_target(policy,
- cbe_freqs,
- target_freq,
- relation,
- &cbe_pmode_new);
-
- freqs.old = policy->cur;
- freqs.new = cbe_freqs[cbe_pmode_new].frequency;
- freqs.cpu = policy->cpu;
-
- mutex_lock(&cbe_switch_mutex);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].index);
-
- rc = set_pmode(policy->cpu, cbe_pmode_new);
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cbe_switch_mutex);
-
- return rc;
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cbe_cpufreq_verify,
- .target = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .owner = THIS_MODULE,
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- if (!machine_is(cell))
- return -ENODEV;
-
- return cpufreq_register_driver(&cbe_cpufreq_driver);
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h
deleted file mode 100644
index c1d86bf..0000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
-extern bool cbe_cpufreq_has_pmi;
-#else
-#define cbe_cpufreq_has_pmi (0)
-#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
deleted file mode 100644
index 20472e4..0000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
deleted file mode 100644
index 60a07a4..0000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <asm/processor.h>
-#include <asm/prom.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "cbe_cpufreq.h"
-
-static u8 pmi_slow_mode_limit[MAX_CBE];
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- u8 node, slow_mode;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- pmi_slow_mode_limit[node] = slow_mode;
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-}
-
-static int pmi_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *cbe_freqs;
- u8 node;
-
- /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
- * and CPUFREQ_NOTIFY policy events?)
- */
- if (event == CPUFREQ_START)
- return 0;
-
- cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
- node = cbe_cpu_to_node(policy->cpu);
-
- pr_debug("got notified, event=%lu, node=%u\n", event, node);
-
- if (pmi_slow_mode_limit[node] != 0) {
- pr_debug("limiting node %d to slow mode %d\n",
- node, pmi_slow_mode_limit[node]);
-
- cpufreq_verify_within_limits(policy, 0,
-
- cbe_freqs[pmi_slow_mode_limit[node]].frequency);
- }
-
- return 0;
-}
-
-static struct notifier_block pmi_notifier_block = {
- .notifier_call = pmi_notifier,
-};
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-
-
-static int __init cbe_cpufreq_pmi_init(void)
-{
- cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
-
- if (!cbe_cpufreq_has_pmi)
- return -ENODEV;
-
- cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
-
- return 0;
-}
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
- cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
- pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
index 3a16c5b..9c339ec 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
@@ -42,14 +42,13 @@ static struct {
static int __init txx9_serial_init(void)
{
extern int early_serial_txx9_setup(struct uart_port *port);
- struct device_node *node = NULL;
+ struct device_node *node;
int i;
struct uart_port req;
struct of_irq irq;
struct resource res;
- while ((node = of_find_compatible_node(node,
- "serial", "toshiba,sio-scc")) != NULL) {
+ for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
if (!(txx9_serial_bitmap & (1<<i)))
continue;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index e56bb65..946306b 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -550,7 +550,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
*/
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
- printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+ dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
of_node_full_name(dev->of_node), dev_to_node(dev));
return NULL;
}
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 59c1a16..348a27b 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -382,7 +382,7 @@ static int __init cbe_init_pm_irq(void)
unsigned int irq;
int rc, node;
- for_each_node(node) {
+ for_each_online_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
if (irq == NO_IRQ) {
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8b12139..f85db3a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -715,7 +715,7 @@ static ssize_t spu_stat_show(struct device *dev,
spu->stats.libassist);
}
-static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL);
+static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 75d6133..b0ec78e 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -60,13 +60,12 @@ long spu_sys_callback(struct spu_syscall_block *s)
syscall = spu_syscall_table[s->nr_ret];
-#ifdef DEBUG
- print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall);
- printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n",
- s->nr_ret,
- s->parm[0], s->parm[1], s->parm[2],
- s->parm[3], s->parm[4], s->parm[5]);
-#endif
+ pr_debug("SPU-syscall "
+ "%pSR:syscall%lld(%llx, %llx, %llx, %llx, %llx, %llx)\n",
+ syscall,
+ s->nr_ret,
+ s->parm[0], s->parm[1], s->parm[2],
+ s->parm[3], s->parm[4], s->parm[5]);
return syscall(s->parm[0], s->parm[1], s->parm[2],
s->parm[3], s->parm[4], s->parm[5]);
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 657e3f2..c9500ea 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -111,7 +111,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
struct spu_context *ctx;
if (file->f_op != &spufs_context_fops)
return 0;
- ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
if (ctx->flags & SPU_CREATE_NOSCHED)
return 0;
return fd + 1;
@@ -137,7 +137,7 @@ static struct spu_context *coredump_next_context(int *fd)
return NULL;
*fd = n - 1;
file = fcheck(*fd);
- return SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+ return SPUFS_I(file_inode(file))->i_ctx;
}
int spufs_coredump_extra_notes_size(void)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 0cfece4..9098692 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -149,7 +149,6 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
return spufs_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = spufs_attr_release, \
.read = spufs_attr_read, \
@@ -352,7 +351,7 @@ static unsigned long spufs_get_unmapped_area(struct file *file,
/* Else, try to obtain a 64K pages slice */
return slice_get_unmapped_area(addr, len, flags,
- MMU_PAGE_64K, 1, 0);
+ MMU_PAGE_64K, 1);
}
#endif /* CONFIG_SPU_FS_64K_LS */
@@ -1852,7 +1851,7 @@ out:
static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (!err) {
mutex_lock(&inode->i_mutex);
@@ -2501,7 +2500,7 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int error = 0, cnt = 0;
@@ -2571,7 +2570,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
int rc;
@@ -2591,7 +2590,6 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
}
static const struct file_operations spufs_switch_log_fops = {
- .owner = THIS_MODULE,
.open = spufs_switch_log_open,
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dba1ce2..35f77a4 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -99,6 +99,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
if (!inode)
goto out;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -199,37 +200,18 @@ static int spufs_fill_dir(struct dentry *dir,
const struct spufs_tree_descr *files, umode_t mode,
struct spu_context *ctx)
{
- struct dentry *dentry, *tmp;
- int ret;
-
while (files->name && files->name[0]) {
- ret = -ENOMEM;
- dentry = d_alloc_name(dir, files->name);
+ int ret;
+ struct dentry *dentry = d_alloc_name(dir, files->name);
if (!dentry)
- goto out;
+ return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
- goto out;
+ return ret;
files++;
}
return 0;
-out:
- /*
- * remove all children from dir. dir->inode is not set so don't
- * just simply use spufs_prune_dir() and panic afterwards :)
- * dput() looks like it will do the right thing:
- * - dec parent's ref counter
- * - remove child from parent's child list
- * - free child's inode if possible
- * - free child
- */
- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- dput(dentry);
- }
-
- shrink_dcache_parent(dir);
- return ret;
}
static int spufs_dir_close(struct inode *inode, struct file *file)
@@ -269,10 +251,9 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
struct inode *inode;
struct spu_context *ctx;
- ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
- goto out;
+ return -ENOSPC;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
@@ -280,40 +261,38 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
}
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
- if (!ctx)
- goto out_iput;
+ if (!ctx) {
+ iput(inode);
+ return -ENOSPC;
+ }
ctx->flags = flags;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
+
+ mutex_lock(&inode->i_mutex);
+
+ dget(dentry);
+ inc_nlink(dir);
+ inc_nlink(inode);
+
+ d_instantiate(dentry, inode);
+
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
mode, ctx);
else
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
- if (ret)
- goto out_free_ctx;
-
- if (spufs_get_sb_info(dir->i_sb)->debug)
+ if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
mode, ctx);
if (ret)
- goto out_free_ctx;
+ spufs_rmdir(dir, dentry);
- d_instantiate(dentry, inode);
- dget(dentry);
- inc_nlink(dir);
- inc_nlink(dentry->d_inode);
- goto out;
+ mutex_unlock(&inode->i_mutex);
-out_free_ctx:
- spu_forget(ctx);
- put_spu_context(ctx);
-out_iput:
- iput(inode);
-out:
return ret;
}
@@ -368,7 +347,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
return ERR_PTR(-EINVAL);
neighbor = get_spu_context(
- SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
+ SPUFS_I(file_inode(filp))->i_ctx);
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
@@ -771,6 +750,7 @@ static struct file_system_type spufs_type = {
.mount = spufs_mount,
.kill_sb = kill_litter_super,
};
+MODULE_ALIAS_FS("spufs");
static int __init spufs_init(void)
{
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 25db92a..4931838 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index baee994..b045fdd 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp,
if (filp->f_op != &spufs_context_fops)
goto out;
- i = SPUFS_I(filp->f_path.dentry->d_inode);
+ i = SPUFS_I(file_inode(filp));
ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 039fc8e..2b4dc6a 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -47,6 +47,25 @@ static struct platform_device mv643xx_eth_shared_device = {
.resource = mv643xx_eth_shared_resources,
};
+/*
+ * The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1
+ */
+static struct resource mv643xx_eth_mvmdio_resources[] = {
+ [0] = {
+ .name = "ethernet mdio base",
+ .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4,
+ .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device mv643xx_eth_mvmdio_device = {
+ .name = "orion-mdio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
+ .resource = mv643xx_eth_shared_resources,
+};
+
static struct resource mv643xx_eth_port1_resources[] = {
[0] = {
.name = "eth port1 irq",
@@ -82,6 +101,7 @@ static struct platform_device eth_port1_device = {
static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
&mv643xx_eth_shared_device,
+ &mv643xx_eth_mvmdio_device,
&eth_port1_device,
};
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 5a8f50a9..302ba43 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -9,7 +9,6 @@ config LINKSTATION
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
select DEFAULT_UIMAGE
- select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select LINKSTATION if configuring for one of PPC- (MPC8241)
@@ -24,7 +23,6 @@ config STORCENTER
select MPIC
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
- select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select STORCENTER if configuring for the iomega StorCenter
@@ -84,9 +82,6 @@ config MV64X60
select PPC_INDIRECT_PCI
select CHECK_CACHE_COHERENCY
-config MPC10X_OPENPIC
- bool
-
config GAMECUBE_COMMON
bool
diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
index b30a6a3..b290b63 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
+++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
@@ -81,17 +81,6 @@
#define MPC10X_MAPB_PCI_MEM_OFFSET (MPC10X_MAPB_ISA_MEM_BASE - \
MPC10X_MAPB_PCI_MEM_START)
-/* Set hose members to values appropriate for the mem map used */
-#define MPC10X_SETUP_HOSE(hose, map) { \
- (hose)->pci_mem_offset = MPC10X_MAP##map##_PCI_MEM_OFFSET; \
- (hose)->io_space.start = MPC10X_MAP##map##_PCI_IO_START; \
- (hose)->io_space.end = MPC10X_MAP##map##_PCI_IO_END; \
- (hose)->mem_space.start = MPC10X_MAP##map##_PCI_MEM_START; \
- (hose)->mem_space.end = MPC10X_MAP##map##_PCI_MEM_END; \
- (hose)->io_base_virt = (void *)MPC10X_MAP##map##_ISA_IO_BASE; \
-}
-
-
/* Miscellaneous Configuration register offsets */
#define MPC10X_CFG_PIR_REG 0x09
#define MPC10X_CFG_PIR_HOST_BRIDGE 0x00
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 890f30e..be1e795 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -273,10 +273,9 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = pas_freqs[pas_astate_new].frequency;
- freqs.cpu = policy->cpu;
mutex_lock(&pas_switch_mutex);
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
@@ -288,7 +287,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&pas_switch_mutex);
ppc_proc_freq = freqs.new * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 311b804..3104fad 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -335,7 +335,8 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
-static int do_set_cpu_speed(int speed_mode, int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
+ int notify)
{
struct cpufreq_freqs freqs;
unsigned long l3cr;
@@ -343,13 +344,12 @@ static int do_set_cpu_speed(int speed_mode, int notify)
freqs.old = cur_freq;
freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
- freqs.cpu = smp_processor_id();
if (freqs.old == freqs.new)
return 0;
if (notify)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@@ -366,7 +366,7 @@ static int do_set_cpu_speed(int speed_mode, int notify)
_set_L3CR(prev_l3cr);
}
if (notify)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@@ -393,7 +393,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
target_freq, relation, &newstate))
return -EINVAL;
- rc = do_set_cpu_speed(newstate, 1);
+ rc = do_set_cpu_speed(policy, newstate, 1);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@@ -442,7 +442,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
- do_set_cpu_speed(CPUFREQ_HIGH, 0);
+ do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
return 0;
}
@@ -458,7 +458,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* is that we force a switch to whatever it was, which is
* probably high speed due to our suspend() routine
*/
- do_set_cpu_speed(sleep_freq == low_freq ?
+ do_set_cpu_speed(policy, sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH, 0);
ppc_proc_freq = cur_freq * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 9650c60..7ba4234 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -339,11 +339,10 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
freqs.new = g5_cpu_freqs[newstate].frequency;
- freqs.cpu = 0;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rc = g5_switch_freq(newstate);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&g5_switch_mutex);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 2b8af75..cf7009b 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -824,6 +824,7 @@ static void __init parse_region_decode(struct pci_controller *hose,
hose->mem_resources[cur].name = hose->dn->full_name;
hose->mem_resources[cur].start = base;
hose->mem_resources[cur].end = end;
+ hose->mem_offset[cur] = 0;
DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
} else {
DBG(" : -0x%08lx\n", end);
@@ -866,7 +867,6 @@ static void __init setup_u3_ht(struct pci_controller* hose)
hose->io_resource.start = 0;
hose->io_resource.end = 0x003fffff;
hose->io_resource.flags = IORESOURCE_IO;
- hose->pci_mem_offset = 0;
hose->first_busno = 0;
hose->last_busno = 0xef;
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 74fea5c..c24684c 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -6,6 +6,12 @@ config PPC_POWERNV
select PPC_ICP_NATIVE
select PPC_P7_NAP
select PPC_PCI_CHOICE if EMBEDDED
+ select EPAPR_BOOT
+ default y
+
+config POWERNV_MSI
+ bool "Support PCI MSI on PowerNV platform"
+ depends on PCI_MSI
default y
config PPC_POWERNV_RTAS
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 3bb07e5..6fabe92 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -107,3 +107,4 @@ OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
+OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index aaa0dba..628c564 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <asm/opal.h>
#include <asm/firmware.h>
@@ -28,13 +29,14 @@ struct opal {
static struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
extern u64 opal_mc_secondary_handler[];
+static unsigned int *opal_irqs;
+static unsigned int opal_irq_count;
int __init early_init_dt_scan_opal(unsigned long node,
const char *uname, int depth, void *data)
{
const void *basep, *entryp;
unsigned long basesz, entrysz;
- u64 glue;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
@@ -54,13 +56,27 @@ int __init early_init_dt_scan_opal(unsigned long node,
opal.entry, entryp, entrysz);
powerpc_firmware_features |= FW_FEATURE_OPAL;
- if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+ if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+ powerpc_firmware_features |= FW_FEATURE_OPALv2;
+ powerpc_firmware_features |= FW_FEATURE_OPALv3;
+ printk("OPAL V3 detected !\n");
+ } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
powerpc_firmware_features |= FW_FEATURE_OPALv2;
printk("OPAL V2 detected !\n");
} else {
printk("OPAL V1 detected !\n");
}
+ return 1;
+}
+
+static int __init opal_register_exception_handlers(void)
+{
+ u64 glue;
+
+ if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
+ return -ENODEV;
+
/* Hookup some exception handlers. We use the fwnmi area at 0x7000
* to provide the glue space to OPAL
*/
@@ -74,9 +90,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
glue += 128;
opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
- return 1;
+ return 0;
}
+early_initcall(opal_register_exception_handlers);
+
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
s64 len, rc;
@@ -133,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
len = total_len;
rc = opal_console_write(vtermno, &len, data);
+
+ /* Closed or other error drop */
+ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
+ rc != OPAL_BUSY_EVENT) {
+ written = total_len;
+ break;
+ }
if (rc == OPAL_SUCCESS) {
total_len -= len;
data += len;
@@ -305,6 +330,8 @@ static int __init opal_init(void)
irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
pr_debug("opal: Found %d interrupts reserved for OPAL\n",
irqs ? (irqlen / 4) : 0);
+ opal_irq_count = irqlen / 4;
+ opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
unsigned int hwirq = be32_to_cpup(irqs);
unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -316,7 +343,19 @@ static int __init opal_init(void)
if (rc)
pr_warning("opal: Error %d requesting irq %d"
" (0x%x)\n", rc, irq, hwirq);
+ opal_irqs[i] = irq;
}
return 0;
}
subsys_initcall(opal_init);
+
+void opal_shutdown(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < opal_irq_count; i++) {
+ if (opal_irqs[i])
+ free_irq(opal_irqs[i], 0);
+ opal_irqs[i] = 0;
+ }
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 8e90e89..9c9d15e 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -26,10 +26,12 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
+#include <asm/xics.h>
#include "powernv.h"
#include "pci.h"
@@ -66,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
define_pe_printk_level(pe_warn, KERN_WARNING);
define_pe_printk_level(pe_info, KERN_INFO);
-static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
-{
- struct device_node *np;
-
- np = pci_device_to_OF_node(dev);
- if (!np)
- return NULL;
- return PCI_DN(np);
-}
-
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
unsigned long pe;
@@ -87,6 +79,7 @@ static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
return IODA_INVALID_PE;
} while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+ phb->ioda.pe_array[pe].phb = phb;
phb->ioda.pe_array[pe].pe_number = pe;
return pe;
}
@@ -107,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
if (!pdn)
return NULL;
@@ -170,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
/* Add to all parents PELT-V */
while (parent) {
- struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+ struct pci_dn *pdn = pci_get_pdn(parent);
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
@@ -249,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
struct pnv_ioda_pe *pe;
int pe_num;
@@ -320,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
if (pdn == NULL) {
pr_warn("%s: No device node associated with device !\n",
@@ -431,22 +424,102 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
-static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev)
+static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
- /* We delay DMA setup after we have assigned all PE# */
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+
+ /*
+ * The function can be called while the PE#
+ * hasn't been assigned. Do nothing for the
+ * case.
+ */
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ return;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
-static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
{
- struct pci_dev *dev;
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ unsigned long start, end, inc;
+
+ start = __pa(startp);
+ end = __pa(endp);
+
+ /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
+ if (tbl->it_busno) {
+ start <<= 12;
+ end <<= 12;
+ inc = 128 << 12;
+ start |= tbl->it_busno;
+ end |= tbl->it_busno;
+ } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
+ } else {
+ /* Default (older HW) */
+ inc = 128;
+ }
- list_for_each_entry(dev, &bus->devices, bus_list) {
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
- if (dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ end |= inc - 1; /* round up end to be different than start */
+
+ mb(); /* Ensure above stores are visible */
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
+ }
+
+ /*
+ * The iommu layer will do another mb() for us on build()
+ * and we don't care on free()
+ */
+}
+
+static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
+ struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ unsigned long start, end, inc;
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+
+ /* We'll invalidate DMA address in PE scope */
+ start = 0x2ul << 60;
+ start |= (pe->pe_number & 0xFF);
+ end = start;
+
+ /* Figure out the start, end and step */
+ inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
+ start |= (inc << 12);
+ inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
+ end |= (inc << 12);
+ inc = (0x1ul << 12);
+ mb();
+
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
}
}
+void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->type == PNV_PHB_IODA1)
+ pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
+ else
+ pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
+}
+
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe, unsigned int base,
unsigned int segs)
@@ -518,16 +591,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
*/
tbl->it_busno = 0;
tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE
- | TCE_PCI_SWINV_PAIR;
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
+ TCE_PCI_SWINV_PAIR;
}
iommu_init_table(tbl, phb->hose->node);
- if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
- else
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
-
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -537,6 +605,76 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ struct page *tce_mem = NULL;
+ void *addr;
+ const __be64 *swinvp;
+ struct iommu_table *tbl;
+ unsigned int tce_table_size, end;
+ int64_t rc;
+
+ /* We shouldn't already have a 32-bit DMA associated */
+ if (WARN_ON(pe->tce32_seg >= 0))
+ return;
+
+ /* The PE will reserve all possible 32-bits space */
+ pe->tce32_seg = 0;
+ end = (1 << ilog2(phb->ioda.m32_pci_base));
+ tce_table_size = (end / 0x1000) * 8;
+ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
+ end);
+
+ /* Allocate TCE table */
+ tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
+ get_order(tce_table_size));
+ if (!tce_mem) {
+ pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
+ goto fail;
+ }
+ addr = page_address(tce_mem);
+ memset(addr, 0, tce_table_size);
+
+ /*
+ * Map TCE table through TVT. The TVE index is the PE number
+ * shifted by 1 bit for 32-bits DMA space.
+ */
+ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ pe->pe_number << 1, 1, __pa(addr),
+ tce_table_size, 0x1000);
+ if (rc) {
+ pe_err(pe, "Failed to configure 32-bit TCE table,"
+ " err %ld\n", rc);
+ goto fail;
+ }
+
+ /* Setup linux iommu table */
+ tbl = &pe->tce32_table;
+ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
+
+ /* OPAL variant of PHB3 invalidated TCEs */
+ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
+ if (swinvp) {
+ /* We need a couple more fields -- an address and a data
+ * to or. Since the bus is only printed out on table free
+ * errors, and on the first pass the data will be a relative
+ * bus number, print that out instead.
+ */
+ tbl->it_busno = 0;
+ tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+ }
+ iommu_init_table(tbl, phb->hose->node);
+
+ return;
+fail:
+ if (pe->tce32_seg >= 0)
+ pe->tce32_seg = -1;
+ if (tce_mem)
+ __free_pages(tce_mem, get_order(tce_table_size));
+}
+
static void pnv_ioda_setup_dma(struct pnv_phb *phb)
{
struct pci_controller *hose = phb->hose;
@@ -579,20 +717,50 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
if (segs > remaining)
segs = remaining;
}
- pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
- pe->dma_weight, segs);
- pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+
+ /*
+ * For IODA2 compliant PHB3, we needn't care about the weight.
+ * The all available 32-bits DMA space will be assigned to
+ * the specific PE.
+ */
+ if (phb->type == PNV_PHB_IODA1) {
+ pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
+ pe->dma_weight, segs);
+ pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+ } else {
+ pe_info(pe, "Assign DMA32 space\n");
+ segs = 0;
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+
remaining -= segs;
base += segs;
}
}
#ifdef CONFIG_PCI_MSI
+static void pnv_ioda2_msi_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct pnv_phb *phb = container_of(chip, struct pnv_phb,
+ ioda.irq_chip);
+ int64_t rc;
+
+ rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
+ WARN_ON_ONCE(rc);
+
+ icp_native_eoi(d);
+}
+
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg)
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+ struct pci_dn *pdn = pci_get_pdn(dev);
+ struct irq_data *idata;
+ struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
uint64_t addr64;
uint32_t addr32, data;
@@ -606,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
if (pe->mve_number < 0)
return -ENXIO;
+ /* Force 32-bit MSI on some broken devices */
+ if (pdn && pdn->force_32bit_msi)
+ is_64 = 0;
+
/* Assign XIVE to PE */
rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
if (rc) {
@@ -637,6 +809,23 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = data;
+ /*
+ * Change the IRQ chip for the MSI interrupts on PHB3.
+ * The corresponding IRQ chip should be populated for
+ * the first time.
+ */
+ if (phb->type == PNV_PHB_IODA2) {
+ if (!phb->ioda.irq_chip_init) {
+ idata = irq_get_irq_data(virq);
+ ichip = irq_data_get_irq_chip(idata);
+ phb->ioda.irq_chip_init = 1;
+ phb->ioda.irq_chip = *ichip;
+ phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
+ }
+
+ irq_set_chip(virq, &phb->ioda.irq_chip);
+ }
+
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
@@ -647,7 +836,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
{
- unsigned int bmap_size;
+ unsigned int count;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop) {
@@ -658,18 +847,17 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
return;
phb->msi_base = be32_to_cpup(prop);
- phb->msi_count = be32_to_cpup(prop + 1);
- bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
- phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
- if (!phb->msi_map) {
+ count = be32_to_cpup(prop + 1);
+ if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
}
+
phb->msi_setup = pnv_pci_ioda_msi_setup;
phb->msi32_support = 1;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- phb->msi_count, phb->msi_base);
+ count, phb->msi_base);
}
#else
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
@@ -722,11 +910,14 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
index++;
}
} else if (res->flags & IORESOURCE_MEM) {
+ /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
+ * harden that algorithm when we start supporting M64
+ */
region.start = res->start -
- hose->pci_mem_offset -
+ hose->mem_offset[0] -
phb->ioda.m32_pci_base;
region.end = res->end -
- hose->pci_mem_offset -
+ hose->mem_offset[0] -
phb->ioda.m32_pci_base;
index = region.start / phb->ioda.m32_segsize;
@@ -839,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
if (!phb->initialized)
return 0;
- pdn = pnv_ioda_get_pdn(dev);
+ pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return -EINVAL;
@@ -852,18 +1043,25 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}
-void __init pnv_pci_init_ioda1_phb(struct device_node *np)
+static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
+{
+ opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
+ OPAL_ASSERT_RESET);
+}
+
+void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
{
struct pci_controller *hose;
static int primary = 1;
struct pnv_phb *phb;
unsigned long size, m32map_off, iomap_off, pemap_off;
const u64 *prop64;
+ const u32 *prop32;
u64 phb_id;
void *aux;
long rc;
- pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name);
+ pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -890,47 +1088,46 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
hose->last_busno = 0xff;
hose->private_data = phb;
phb->opal_id = phb_id;
- phb->type = PNV_PHB_IODA1;
+ phb->type = ioda_type;
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
phb->model = PNV_PHB_MODEL_P7IOC;
+ else if (of_device_is_compatible(np, "ibm,power8-pciex"))
+ phb->model = PNV_PHB_MODEL_PHB3;
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
- /* We parse "ranges" now since we need to deduce the register base
- * from the IO base
- */
+ /* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(phb->hose, np, primary);
primary = 0;
- /* Magic formula from Milton */
+ /* Get registers */
phb->regs = of_iomap(np, 0);
if (phb->regs == NULL)
pr_err(" Failed to map registers !\n");
-
- /* XXX This is hack-a-thon. This needs to be changed so that:
- * - we obtain stuff like PE# etc... from device-tree
- * - we properly re-allocate M32 ourselves
- * (the OFW one isn't very good)
- */
-
/* Initialize more IODA stuff */
- phb->ioda.total_pe = 128;
+ prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
+ if (!prop32)
+ phb->ioda.total_pe = 1;
+ else
+ phb->ioda.total_pe = *prop32;
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
- /* OFW Has already off top 64k of M32 space (MSI space) */
+ /* FW Has already off top 64k of M32 space (MSI space) */
phb->ioda.m32_size += 0x10000;
phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
- phb->ioda.m32_pci_base = hose->mem_resources[0].start -
- hose->pci_mem_offset;
+ phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
phb->ioda.io_size = hose->pci_io_size;
phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
- /* Allocate aux data & arrays */
+ /* Allocate aux data & arrays
+ *
+ * XXX TODO: Don't allocate io segmap on PHB3
+ */
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
@@ -960,7 +1157,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
hose->mem_resources[2].start = 0;
hose->mem_resources[2].end = 0;
-#if 0
+#if 0 /* We should really do that ... */
rc = opal_pci_set_phb_mem_window(opal->phb_id,
window_type,
window_num,
@@ -974,16 +1171,6 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
phb->ioda.m32_size, phb->ioda.m32_segsize,
phb->ioda.io_size, phb->ioda.io_segsize);
- if (phb->regs) {
- pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100));
- pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160));
- pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170));
- pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178));
- pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180));
- pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190));
- pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198));
- pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0));
- }
phb->hose->ops = &pnv_pci_ops;
/* Setup RID -> PE mapping function */
@@ -992,6 +1179,9 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ /* Setup shutdown function for kexec */
+ phb->shutdown = pnv_pci_ioda_shutdown;
+
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
@@ -1011,7 +1201,18 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
if (rc)
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
- opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+
+ /*
+ * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
+ * has cleared the RTT which has the same effect
+ */
+ if (ioda_type == PNV_PHB_IODA1)
+ opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+}
+
+void pnv_pci_init_ioda2_phb(struct device_node *np)
+{
+ pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
}
void __init pnv_pci_init_ioda_hub(struct device_node *np)
@@ -1034,6 +1235,6 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np)
for_each_child_of_node(np, phbn) {
/* Look for IODA1 PHBs */
if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
- pnv_pci_init_ioda1_phb(phbn);
+ pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);
}
}
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 7db8771..92b37a0 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -26,6 +26,7 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
@@ -41,8 +42,8 @@
#ifdef CONFIG_PCI_MSI
static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg)
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg)
{
if (WARN_ON(!is_64))
return -ENXIO;
@@ -55,7 +56,7 @@ static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
{
- unsigned int bmap_size;
+ unsigned int count;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop)
@@ -67,10 +68,8 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix"))
return;
phb->msi_base = be32_to_cpup(prop);
- phb->msi_count = be32_to_cpup(prop + 1);
- bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
- phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
- if (!phb->msi_map) {
+ count = be32_to_cpup(prop + 1);
+ if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
@@ -78,7 +77,7 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
phb->msi_setup = pnv_pci_p5ioc2_msi_setup;
phb->msi32_support = 0;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- phb->msi_count, phb->msi_base);
+ count, phb->msi_base);
}
#else
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b8b8e0b..277343c 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -26,6 +26,7 @@
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
+#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
@@ -46,44 +47,12 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pci_get_pdn(pdev);
- return (phb && phb->msi_map) ? 0 : -ENODEV;
-}
-
-static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
-{
- unsigned long flags;
- unsigned int id, rc;
-
- spin_lock_irqsave(&phb->lock, flags);
-
- id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
- if (id >= phb->msi_count && phb->msi_next)
- id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
- if (id >= phb->msi_count) {
- rc = 0;
- goto out;
- }
- __set_bit(id, phb->msi_map);
- rc = id + phb->msi_base;
-out:
- spin_unlock_irqrestore(&phb->lock, flags);
- return rc;
-}
-
-static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
-{
- unsigned long flags;
- unsigned int id;
-
- if (WARN_ON(hwirq < phb->msi_base ||
- hwirq >= (phb->msi_base + phb->msi_count)))
- return;
- id = hwirq - phb->msi_base;
+ if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+ return -ENODEV;
- spin_lock_irqsave(&phb->lock, flags);
- __clear_bit(id, phb->msi_map);
- spin_unlock_irqrestore(&phb->lock, flags);
+ return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -92,7 +61,8 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
struct msi_msg msg;
- unsigned int hwirq, virq;
+ int hwirq;
+ unsigned int virq;
int rc;
if (WARN_ON(!phb))
@@ -104,25 +74,25 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
pci_name(pdev));
return -ENXIO;
}
- hwirq = pnv_get_one_msi(phb);
- if (!hwirq) {
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
+ if (hwirq < 0) {
pr_warn("%s: Failed to find a free MSI\n",
pci_name(pdev));
return -ENOSPC;
}
- virq = irq_create_mapping(NULL, hwirq);
+ virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
if (virq == NO_IRQ) {
pr_warn("%s: Failed to map MSI to linux irq\n",
pci_name(pdev));
- pnv_put_msi(phb, hwirq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
return -ENOMEM;
}
- rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
- &msg);
+ rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
+ virq, entry->msi_attrib.is_64, &msg);
if (rc) {
pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
irq_dispose_mapping(virq);
- pnv_put_msi(phb, hwirq);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
return rc;
}
irq_set_msi_desc(virq, entry);
@@ -144,7 +114,8 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
- pnv_put_msi(phb, virq_to_hw(entry->irq));
+ msi_bitmap_free_hwirqs(&phb->msi_bmp,
+ virq_to_hw(entry->irq) - phb->msi_base, 1);
irq_dispose_mapping(entry->irq);
}
}
@@ -362,48 +333,6 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
-
-static void pnv_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
-{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
- unsigned long start, end, inc;
-
- start = __pa(startp);
- end = __pa(endp);
-
-
- /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
- if (tbl->it_busno) {
- start <<= 12;
- end <<= 12;
- inc = 128 << 12;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- }
- /* p7ioc-style invalidation, 2 TCEs per write */
- else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
- start |= (1ull << 63);
- end |= (1ull << 63);
- inc = 16;
- }
- /* Default (older HW) */
- else
- inc = 128;
-
- end |= inc - 1; /* round up end to be different than start */
-
- mb(); /* Ensure above stores are visible */
- while (start <= end) {
- __raw_writeq(start, invalidate);
- start += inc;
- }
- /* The iommu layer will do another mb() for us on build() and
- * we don't care on free()
- */
-}
-
-
static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
@@ -428,7 +357,7 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
* of flags if that becomes the case
*/
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
return 0;
}
@@ -443,7 +372,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
*(tcep++) = 0;
if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
}
static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -525,7 +454,19 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
-/* Fixup wrong class code in p7ioc root complex */
+void pnv_pci_shutdown(void)
+{
+ struct pci_controller *hose;
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->shutdown)
+ phb->shutdown(phb);
+ }
+}
+
+/* Fixup wrong class code in p7ioc and p8 root complex */
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
@@ -591,6 +532,10 @@ void __init pnv_pci_init(void)
if (!found_ioda)
for_each_compatible_node(np, NULL, "ibm,p5ioc2")
pnv_pci_init_p5ioc2_hub(np);
+
+ /* Look for ioda2 built-in PHB3's */
+ for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
+ pnv_pci_init_ioda2_phb(np);
}
/* Setup the linkage between OF nodes and PHBs */
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7cfb7c8..25d76c4 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -4,9 +4,9 @@
struct pci_dn;
enum pnv_phb_type {
- PNV_PHB_P5IOC2,
- PNV_PHB_IODA1,
- PNV_PHB_IODA2,
+ PNV_PHB_P5IOC2 = 0,
+ PNV_PHB_IODA1 = 1,
+ PNV_PHB_IODA2 = 2,
};
/* Precise PHB model for error management */
@@ -14,6 +14,7 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P5IOC2,
PNV_PHB_MODEL_P7IOC,
+ PNV_PHB_MODEL_PHB3,
};
#define PNV_PCI_DIAG_BUF_SIZE 4096
@@ -22,8 +23,10 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
/* Data associated with a PE, including IOMMU tracking etc.. */
+struct pnv_phb;
struct pnv_ioda_pe {
unsigned long flags;
+ struct pnv_phb *phb;
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
@@ -73,18 +76,17 @@ struct pnv_phb {
spinlock_t lock;
#ifdef CONFIG_PCI_MSI
- unsigned long *msi_map;
unsigned int msi_base;
- unsigned int msi_count;
- unsigned int msi_next;
unsigned int msi32_support;
+ struct msi_bitmap msi_bmp;
#endif
int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int is_64,
- struct msi_msg *msg);
+ unsigned int hwirq, unsigned int virq,
+ unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
+ void (*shutdown)(struct pnv_phb *phb);
union {
struct {
@@ -109,6 +111,10 @@ struct pnv_phb {
unsigned int *io_segmap;
struct pnv_ioda_pe *pe_array;
+ /* IRQ chip */
+ int irq_chip_init;
+ struct irq_chip irq_chip;
+
/* Sorted list of used PE's based
* on the sequence of creation
*/
@@ -150,6 +156,8 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
u64 dma_offset);
extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
-
+extern void pnv_pci_init_ioda2_phb(struct device_node *np);
+extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 8a9df7f..a1c6f83 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
+extern void pnv_pci_shutdown(void);
#else
static inline void pnv_pci_init(void) { }
+static inline void pnv_pci_shutdown(void) { }
#endif
#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index db1ad1c..d4459bf 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
if (root)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: PowerNV %s\n", model);
- if (firmware_has_feature(FW_FEATURE_OPALv2))
+ if (firmware_has_feature(FW_FEATURE_OPALv3))
+ seq_printf(m, "firmware\t: OPAL v3\n");
+ else if (firmware_has_feature(FW_FEATURE_OPALv2))
seq_printf(m, "firmware\t: OPAL v2\n");
else if (firmware_has_feature(FW_FEATURE_OPAL))
seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
+static void pnv_shutdown(void)
+{
+ /* Let the PCI code clear up IODA tables */
+ pnv_pci_shutdown();
+
+ /* And unregister all OPAL interrupts so they don't fire
+ * up while we kexec
+ */
+ opal_shutdown();
+}
+
#ifdef CONFIG_KEXEC
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
@@ -187,6 +200,7 @@ define_machine(powernv) {
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.progress = pnv_progress,
+ .machine_shutdown = pnv_shutdown,
.power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index c916432..8cbaa59 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -55,16 +55,68 @@ int pnv_smp_kick_cpu(int nr)
BUG_ON(nr < 0 || nr >= NR_CPUS);
- /* On OPAL v2 the CPU are still spinning inside OPAL itself,
- * get them back now
+ /*
+ * If we already started or OPALv2 is not supported, we just
+ * kick the CPU via the PACA
*/
- if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
- pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
- rc = opal_start_cpu(pcpu, start_here);
- if (rc != OPAL_SUCCESS)
- pr_warn("OPAL Error %ld starting CPU %d\n",
+ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+ goto kick;
+
+ /*
+ * At this point, the CPU can either be spinning on the way in
+ * from kexec or be inside OPAL waiting to be started for the
+ * first time. OPAL v3 allows us to query OPAL to know if it
+ * has the CPUs, so we do that
+ */
+ if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ uint8_t status;
+
+ rc = opal_query_cpu_status(pcpu, &status);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("OPAL Error %ld querying CPU %d state\n",
rc, nr);
+ return -ENODEV;
+ }
+
+ /*
+ * Already started, just kick it, probably coming from
+ * kexec and spinning
+ */
+ if (status == OPAL_THREAD_STARTED)
+ goto kick;
+
+ /*
+ * Available/inactive, let's kick it
+ */
+ if (status == OPAL_THREAD_INACTIVE) {
+ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
+ nr, pcpu);
+ rc = opal_start_cpu(pcpu, start_here);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("OPAL Error %ld starting CPU %d\n",
+ rc, nr);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * An unavailable CPU (or any other unknown status)
+ * shouldn't be started. It should also
+ * not be in the possible map but currently it can
+ * happen
+ */
+ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+ " (status %d)...\n", nr, pcpu, status);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * On OPAL v2, we just kick it and hope for the best,
+ * we must not test the error from opal_start_cpu() or
+ * we would fail to get CPUs from kexec.
+ */
+ opal_start_cpu(pcpu, start_here);
}
+ kick:
return smp_generic_kick_cpu(nr);
}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
deleted file mode 100644
index 1547f66..0000000
--- a/arch/powerpc/platforms/prep/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-config PPC_PREP
- bool "PowerPC Reference Platform (PReP) based machines"
- depends on 6xx && BROKEN
- select HAVE_PCSPKR_PLATFORM
- select MPIC
- select PPC_I8259
- select PPC_INDIRECT_PCI
- select PPC_UDBG_16550
- select PPC_NATIVE
- default n
-
-config PREP_RESIDUAL
- bool "Support for PReP Residual Data"
- depends on PPC_PREP
- help
- Some PReP systems have residual data passed to the kernel by the
- firmware. This allows detection of memory size, devices present and
- other useful pieces of information. Sometimes this information is
- not present or incorrect, in which case it could lead to the machine
- behaving incorrectly. If this happens, either disable PREP_RESIDUAL
- or pass the 'noresidual' option to the kernel.
-
- If you are running a PReP system, say Y here, otherwise say N.
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 46b7f02..e87c194 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -48,7 +48,7 @@ config PS3_HTAB_SIZE
system will have optimal runtime performance.
config PS3_DYNAMIC_DMA
- depends on PPC_PS3 && EXPERIMENTAL
+ depends on PPC_PS3
bool "PS3 Platform dynamic DMA page table management"
default n
help
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index d00d7b0..177a2f7 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -27,6 +27,7 @@
#include <asm/lv1call.h>
#include <asm/ps3fb.h>
+#define PS3_VERBOSE_RESULT
#include "platform.h"
/**
@@ -45,7 +46,7 @@ static DEFINE_SPINLOCK(ps3_htab_lock);
static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
int result;
u64 hpte_v, hpte_r;
@@ -61,8 +62,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
*/
vflags &= ~HPTE_V_SECONDARY;
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +76,9 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
if (result) {
/* all entries bolted !*/
- pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
- __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
+ pr_info("%s:result=%s vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
+ __func__, ps3_result(result), vpn, pa, hpte_group,
+ hpte_v, hpte_r);
BUG();
}
@@ -115,7 +117,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long flags;
long ret;
- want_v = hpte_encode_v(vpn, psize, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -125,8 +127,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
&hpte_rs);
if (result) {
- pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
- __func__, result, vpn, slot, psize);
+ pr_info("%s: result=%s read vpn=%lx slot=%lx psize=%d\n",
+ __func__, ps3_result(result), vpn, slot, psize);
BUG();
}
@@ -170,8 +172,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
if (result) {
- pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
- __func__, result, vpn, slot, psize);
+ pr_info("%s: result=%s vpn=%lx slot=%lx psize=%d\n",
+ __func__, ps3_result(result), vpn, slot, psize);
BUG();
}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index 40b5cb4..cba1e6b 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -89,10 +89,8 @@ static int __init ps3_rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_RET(pdev);
}
module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 837cf49..4459eff 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -17,6 +17,10 @@ config PPC_PSERIES
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
select ZLIB_DEFLATE
+ select PPC_DOORBELL
+ select HAVE_CONTEXT_TRACKING
+ select HOTPLUG if SMP
+ select HOTPLUG_CPU if SMP
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a764854..0cc0ac0 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7;
*/
static int dtl_buf_entries = N_DISPATCH_LOG;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
struct dtl_ring {
u64 write_index;
struct dtl_entry *write_ptr;
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl)
return per_cpu(dtl_rings, dtl->cpu).write_index;
}
-#else /* CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_start(struct dtl *dtl)
{
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl)
{
return lppaca_of(dtl->cpu).dtl_idx;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_enable(struct dtl *dtl)
{
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 9a04322..6b73d6c 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -788,7 +788,6 @@ static void eeh_add_device_late(struct pci_dev *dev)
dev->dev.archdata.edev = edev;
eeh_addr_cache_insert_dev(dev);
- eeh_sysfs_add_device(dev);
}
/**
@@ -815,6 +814,29 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
/**
+ * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
+ * @bus: PCI bus
+ *
+ * This routine must be used to add EEH sysfs files for PCI
+ * devices which are attached to the indicated PCI bus. The PCI bus
+ * is added after system boot through hotplug or dlpar.
+ */
+void eeh_add_sysfs_files(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ eeh_sysfs_add_device(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ struct pci_bus *subbus = dev->subordinate;
+ if (subbus)
+ eeh_add_sysfs_files(subbus);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
+
+/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
* @purge_pe: remove the PE or not
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 5a4c879..5ce3ba7 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -294,8 +294,6 @@ void __init eeh_addr_cache_build(void)
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- eeh_addr_cache_insert_dev(dev);
-
dn = pci_device_to_OF_node(dev);
if (!dn)
continue;
@@ -308,6 +306,8 @@ void __init eeh_addr_cache_build(void)
dev->dev.archdata.edev = edev;
edev->pdev = dev;
+ eeh_addr_cache_insert_dev(dev);
+
eeh_sysfs_add_device(dev);
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
index fe43d1a..9d4a9e8 100644
--- a/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -639,7 +639,8 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
if (pe->type & EEH_PE_PHB) {
bus = pe->phb->bus;
- } else if (pe->type & EEH_PE_BUS) {
+ } else if (pe->type & EEH_PE_BUS ||
+ pe->type & EEH_PE_DEVICE) {
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f9..b456b15 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
- /* necessary sanity check */
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
- } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
- ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
- "<ibm,get-config-addr-info> invalid\n",
- __func__);
- return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 7b56118..8c80588 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -28,13 +28,18 @@
#include "pseries.h"
-typedef struct {
+struct hypertas_fw_feature {
unsigned long val;
char * name;
-} firmware_feature_t;
+};
-static __initdata firmware_feature_t
-firmware_features_table[FIRMWARE_MAX_FEATURES] = {
+/*
+ * The names in this table match names in rtas/ibm,hypertas-functions. If the
+ * entry ends in a '*', only upto the '*' is matched. Otherwise the entire
+ * string must match.
+ */
+static __initdata struct hypertas_fw_feature
+hypertas_fw_features_table[] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
@@ -57,32 +62,72 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_SPLPAR, "hcall-splpar"},
{FW_FEATURE_VPHN, "hcall-vphn"},
{FW_FEATURE_SET_MODE, "hcall-set-mode"},
+ {FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"},
};
/* Build up the firmware features bitmask using the contents of
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_feature_init(const char *hypertas, unsigned long len)
+void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
{
const char *s;
int i;
- pr_debug(" -> fw_feature_init()\n");
+ pr_debug(" -> fw_hypertas_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
- for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
- /* check value against table of strings */
- if (!firmware_features_table[i].name ||
- strcmp(firmware_features_table[i].name, s))
+ for (i = 0; i < ARRAY_SIZE(hypertas_fw_features_table); i++) {
+ const char *name = hypertas_fw_features_table[i].name;
+ size_t size;
+
+ /*
+ * If there is a '*' at the end of name, only check
+ * upto there
+ */
+ size = strlen(name);
+ if (size && name[size - 1] == '*') {
+ if (strncmp(name, s, size - 1))
+ continue;
+ } else if (strcmp(name, s))
continue;
/* we have a match */
powerpc_firmware_features |=
- firmware_features_table[i].val;
+ hypertas_fw_features_table[i].val;
break;
}
}
- pr_debug(" <- fw_feature_init()\n");
+ pr_debug(" <- fw_hypertas_feature_init()\n");
+}
+
+struct vec5_fw_feature {
+ unsigned long val;
+ unsigned int feature;
+};
+
+static __initdata struct vec5_fw_feature
+vec5_fw_features_table[] = {
+ {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
+ {FW_FEATURE_PRRN, OV5_PRRN},
+};
+
+void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
+{
+ unsigned int index, feat;
+ int i;
+
+ pr_debug(" -> fw_vec5_feature_init()\n");
+
+ for (i = 0; i < ARRAY_SIZE(vec5_fw_features_table); i++) {
+ index = OV5_INDX(vec5_fw_features_table[i].feature);
+ feat = OV5_FEAT(vec5_fw_features_table[i].feature);
+
+ if (vec5[index] & feat)
+ powerpc_firmware_features |=
+ vec5_fw_features_table[i].val;
+ }
+
+ pr_debug(" <- fw_vec5_feature_init()\n");
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a389562..217ca5c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -127,9 +127,16 @@ static void pseries_mach_cpu_die(void)
get_lppaca()->donate_dedicated_cpu = 1;
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
+ while (!prep_irq_for_idle()) {
+ local_irq_enable();
+ local_irq_disable();
+ }
+
extended_cede_processor(cede_latency_hint);
}
+ local_irq_disable();
+
if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0;
@@ -137,6 +144,7 @@ static void pseries_mach_cpu_die(void)
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu);
+ hard_irq_disable();
/*
* Call to start_secondary_resume() will not return.
* Kernel stack will be reset and start_secondary()
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c60..9a432de 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void)
return get_memblock_size();
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long start, start_pfn;
@@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np)
ret = pseries_remove_memblock(base, lmb_size);
return ret;
}
+#else
+static inline int pseries_remove_memblock(unsigned long base,
+ unsigned int memblock_size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pseries_remove_memory(struct device_node *np)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
static int pseries_add_memory(struct device_node *np)
{
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index c9311cf..cf4e773 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -86,7 +86,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file)
rc = seq_open(file, &hcall_inst_seq_ops);
seq = file->private_data;
- seq->private = file->f_path.dentry->d_inode->i_private;
+ seq->private = file_inode(file)->i_private;
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4c..4557e91 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
+ strlcpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
+ sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e2685ba..86ae364 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -382,6 +382,7 @@ static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
dma_offset,
0, limit);
+ next += limit * tce_size;
num_tce -= limit;
} while (num_tce > 0 && !rc);
@@ -786,33 +787,68 @@ static u64 find_existing_ddw(struct device_node *pdn)
return dma_addr;
}
+static void __restore_default_window(struct eeh_dev *edev,
+ u32 ddw_restore_token)
+{
+ u32 cfg_addr;
+ u64 buid;
+ int ret;
+
+ /*
+ * Get the config address and phb buid of the PE window.
+ * Rely on eeh to retrieve this for us.
+ * Retrieve them from the pci device, not the node with the
+ * dma-window property
+ */
+ cfg_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ cfg_addr = edev->pe_config_addr;
+ buid = edev->phb->buid;
+
+ do {
+ ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
+ BUID_HI(buid), BUID_LO(buid));
+ } while (rtas_busy_delay(ret));
+ pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
+ ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
+}
+
static int find_existing_ddw_windows(void)
{
- int len;
struct device_node *pdn;
- struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
+ const u32 *ddw_extensions;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
if (!direct64)
continue;
- window = kzalloc(sizeof(*window), GFP_KERNEL);
- if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
- kfree(window);
- remove_ddw(pdn);
- continue;
- }
+ /*
+ * We need to ensure the IOMMU table is active when we
+ * return from the IOMMU setup so that the common code
+ * can clear the table or find the holes. To that end,
+ * first, remove any existing DDW configuration.
+ */
+ remove_ddw(pdn);
- window->device = pdn;
- window->prop = direct64;
- spin_lock(&direct_window_list_lock);
- list_add(&window->list, &direct_window_list);
- spin_unlock(&direct_window_list_lock);
+ /*
+ * Second, if we are running on a new enough level of
+ * firmware where the restore API is present, use it to
+ * restore the 32-bit window, which was removed in
+ * create_ddw.
+ * If the API is not present, then create_ddw couldn't
+ * have removed the 32-bit window in the first place, so
+ * removing the DDW configuration should be sufficient.
+ */
+ ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
+ NULL);
+ if (ddw_extensions && ddw_extensions[0] > 0)
+ __restore_default_window(of_node_to_eeh_dev(pdn),
+ ddw_extensions[1]);
}
return 0;
@@ -883,33 +919,17 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
}
static void restore_default_window(struct pci_dev *dev,
- u32 ddw_restore_token, unsigned long liobn)
+ u32 ddw_restore_token)
{
- struct eeh_dev *edev;
- u32 cfg_addr;
- u64 buid;
- int ret;
+ __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
+}
- /*
- * Get the config address and phb buid of the PE window.
- * Rely on eeh to retrieve this for us.
- * Retrieve them from the pci device, not the node with the
- * dma-window property
- */
- edev = pci_dev_to_eeh_dev(dev);
- cfg_addr = edev->config_addr;
- if (edev->pe_config_addr)
- cfg_addr = edev->pe_config_addr;
- buid = edev->phb->buid;
+struct failed_ddw_pdn {
+ struct device_node *pdn;
+ struct list_head list;
+};
- do {
- ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
- BUID_HI(buid), BUID_LO(buid));
- } while (rtas_busy_delay(ret));
- dev_info(&dev->dev,
- "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
-}
+static LIST_HEAD(failed_ddw_pdn_list);
/*
* If the PE supports dynamic dma windows, and there is space for a table
@@ -938,6 +958,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
struct dynamic_dma_window_prop *ddwprop;
const void *dma_window = NULL;
unsigned long liobn, offset, size;
+ struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -946,6 +967,18 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_unlock;
/*
+ * If we already went through this for a previous function of
+ * the same device and failed, we don't want to muck with the
+ * DMA window again, as it will race with in-flight operations
+ * and can lead to EEHs. The above mutex protects access to the
+ * list.
+ */
+ list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
+ if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
+ goto out_unlock;
+ }
+
+ /*
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
@@ -1099,7 +1132,13 @@ out_free_prop:
out_restore_window:
if (ddw_restore_token)
- restore_default_window(dev, ddw_restore_token, liobn);
+ restore_default_window(dev, ddw_restore_token);
+
+ fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
+ if (!fpdn)
+ goto out_unlock;
+ fpdn->pdn = pdn;
+ list_add(&fpdn->list, &failed_ddw_pdn_list);
out_unlock:
mutex_unlock(&direct_window_init_mutex);
@@ -1295,6 +1334,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
switch (action) {
case OF_RECONFIG_DETACH_NODE:
+ remove_ddw(np);
if (pci && pci->iommu_table)
iommu_free_table(pci->iommu_table, np->full_name);
@@ -1307,16 +1347,6 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
}
}
spin_unlock(&direct_window_list_lock);
-
- /*
- * Because the notifier runs after isolation of the
- * slot, we are guaranteed any DMA window has already
- * been revoked and the TCEs have been marked invalid,
- * so we don't need a call to remove_ddw(np). However,
- * if an additional notifier action is added before the
- * isolate call, we should update this code for
- * completeness with such a call.
- */
break;
default:
err = NOTIFY_DONE;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fe..6d62072 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -109,7 +109,7 @@ void vpa_init(int cpu)
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
- int psize, int ssize)
+ int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
@@ -121,8 +121,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
"pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
- hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize) | rflags;
+ hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -155,7 +155,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
- pr_devel(" lpar err %lu\n", lpar_rc);
+ pr_devel(" lpar err %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_NOT_FOUND);
+
+ /*
+ * The test for adjunct partition is performed before the
+ * ANDCOND test. H_RESOURCE may be returned, so we need to
+ * check for that as well.
+ */
+ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 6573808..3d01eee 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -37,14 +37,16 @@ struct update_props_workarea {
#define UPDATE_DT_NODE 0x02000000
#define ADD_DT_NODE 0x03000000
-static int mobility_rtas_call(int token, char *buf)
+#define MIGRATION_SCOPE (1)
+
+static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
@@ -123,7 +125,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle)
+static int update_dt_node(u32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -132,6 +134,7 @@ static int update_dt_node(u32 phandle)
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
if (update_properties_token == RTAS_UNKNOWN_SERVICE)
@@ -151,19 +154,31 @@ static int update_dt_node(u32 phandle)
upwa->phandle = phandle;
do {
- rc = mobility_rtas_call(update_properties_token, rtas_buf);
+ rc = mobility_rtas_call(update_properties_token, rtas_buf,
+ scope);
if (rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
- for (i = 0; i < upwa->nprops; i++) {
+ /* The first element of the buffer is the path of the node
+ * being updated in the form of a 8 byte string length
+ * followed by the string. Skip past this to get to the
+ * properties being updated.
+ */
+ vd = *prop_data++;
+ prop_data += vd;
+
+ /* The path we skipped over is counted as one of the elements
+ * returned so start counting at one.
+ */
+ for (i = 1; i < upwa->nprops; i++) {
char *prop_name;
- u32 vd;
- prop_name = prop_data + 1;
+ prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *prop_data++;
+ vd = *(u32 *)prop_data;
+ prop_data += sizeof(vd);
switch (vd) {
case 0x00000000:
@@ -219,7 +234,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
return rc;
}
-static int pseries_devicetree_update(void)
+int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
u32 *data;
@@ -235,7 +250,7 @@ static int pseries_devicetree_update(void)
return -ENOMEM;
do {
- rc = mobility_rtas_call(update_nodes_token, rtas_buf);
+ rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
if (rc && rc != 1)
break;
@@ -256,7 +271,7 @@ static int pseries_devicetree_update(void)
delete_dt_node(phandle);
break;
case UPDATE_DT_NODE:
- update_dt_node(phandle);
+ update_dt_node(phandle, scope);
break;
case ADD_DT_NODE:
drc_index = *data++;
@@ -276,7 +291,7 @@ void post_mobility_fixup(void)
int rc;
int activate_fw_token;
- rc = pseries_devicetree_update();
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc) {
printk(KERN_ERR "Initial post-mobility device tree update "
"failed: %d\n", rc);
@@ -292,7 +307,7 @@ void post_mobility_fixup(void)
rc = rtas_call(activate_fw_token, 0, 1, NULL);
if (!rc) {
- rc = pseries_devicetree_update();
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
printk(KERN_ERR "Secondary post-mobility device tree "
"update failed: %d\n", rc);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index e5b0847..6d2f0ab 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -24,26 +24,7 @@ static int query_token, change_token;
#define RTAS_RESET_FN 2
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
-
-static struct pci_dn *get_pdn(struct pci_dev *pdev)
-{
- struct device_node *dn;
- struct pci_dn *pdn;
-
- dn = pci_device_to_OF_node(pdev);
- if (!dn) {
- dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
- return NULL;
- }
-
- pdn = PCI_DN(dn);
- if (!pdn) {
- dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
- return NULL;
- }
-
- return pdn;
-}
+#define RTAS_CHANGE_32MSI_FN 5
/* RTAS Helpers */
@@ -58,7 +39,8 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
seq_num = 1;
do {
- if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN)
+ if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
+ func == RTAS_CHANGE_32MSI_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
@@ -89,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
{
struct pci_dn *pdn;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return;
@@ -150,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
struct pci_dn *pdn;
const u32 *req_msi;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
@@ -392,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev)
return 0;
}
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+ u32 addr_hi, addr_lo;
+
+ /*
+ * We should only get in here for IODA1 configs. This is based on the
+ * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+ * support, and we are in a PCIe Gen2 slot.
+ */
+ dev_info(&pdev->dev,
+ "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+ pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
+ addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
+ pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
+}
+
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
{
struct pci_dn *pdn;
@@ -399,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
struct msi_desc *entry;
struct msi_msg msg;
int nvec = nvec_in;
+ int use_32bit_msi_hack = 0;
- pdn = get_pdn(pdev);
+ pdn = pci_get_pdn(pdev);
if (!pdn)
return -ENODEV;
@@ -426,12 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
*/
again:
if (type == PCI_CAP_ID_MSI) {
- rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
+ if (pdn->force_32bit_msi) {
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+ if (rc < 0) {
+ /*
+ * We only want to run the 32 bit MSI hack below if
+ * the max bus speed is Gen2 speed
+ */
+ if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+ return rc;
+
+ use_32bit_msi_hack = 1;
+ }
+ } else
+ rc = -1;
+
+ if (rc < 0)
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
if (rc < 0) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
+
+ if (use_32bit_msi_hack && rc > 0)
+ rtas_hack_32bit_msi_gen2(pdev);
} else
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
@@ -512,3 +531,4 @@ static int rtas_msi_init(void)
return 0;
}
arch_initcall(rtas_msi_init);
+
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 56b864d..5f93856 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -40,7 +40,8 @@ void pcibios_name_device(struct pci_dev *dev)
*/
dn = pci_device_to_OF_node(dev);
if (dn) {
- const char *loc_code = of_get_property(dn, "ibm,loc-code", 0);
+ const char *loc_code = of_get_property(dn, "ibm,loc-code",
+ NULL);
if (loc_code) {
int loc_len = strlen(loc_code);
if (loc_len < sizeof(dev->dev.name)) {
@@ -107,3 +108,56 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
+
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct device_node *dn, *pdn;
+ struct pci_bus *bus;
+ const uint32_t *pcie_link_speed_stats;
+
+ bus = bridge->bus;
+
+ dn = pcibios_get_phb_of_node(bus);
+ if (!dn)
+ return 0;
+
+ for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
+ pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+ "ibm,pcie-link-speed-stats", NULL);
+ if (pcie_link_speed_stats)
+ break;
+ }
+
+ of_node_put(pdn);
+
+ if (!pcie_link_speed_stats) {
+ pr_err("no ibm,pcie-link-speed-stats property\n");
+ return 0;
+ }
+
+ switch (pcie_link_speed_stats[0]) {
+ case 0x01:
+ bus->max_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (pcie_link_speed_stats[1]) {
+ case 0x01:
+ bus->cur_bus_speed = PCIE_SPEED_2_5GT;
+ break;
+ case 0x02:
+ bus->cur_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index e6cc34a..f35787b 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -2,6 +2,7 @@
#define _PSERIES_PLPAR_WRAPPERS_H
#include <linux/string.h>
+#include <linux/irqflags.h>
#include <asm/hvcall.h>
#include <asm/paca.h>
@@ -41,7 +42,14 @@ static inline long extended_cede_processor(unsigned long latency_hint)
u8 old_latency_hint = get_cede_latency_hint();
set_cede_latency_hint(latency_hint);
+
rc = cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+
set_cede_latency_hint(old_latency_hint);
return rc;
@@ -50,40 +58,39 @@ static inline long extended_cede_processor(unsigned long latency_hint)
static inline long vpa_call(unsigned long flags, unsigned long cpu,
unsigned long vpa)
{
- /* flags are in bits 16-18 (counting from most significant bit) */
- flags = flags << (63 - 18);
+ flags = flags << H_VPA_FUNC_SHIFT;
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
static inline long unregister_vpa(unsigned long cpu)
{
- return vpa_call(0x5, cpu, 0);
+ return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x1, cpu, vpa);
+ return vpa_call(H_VPA_REG_VPA, cpu, vpa);
}
static inline long unregister_slb_shadow(unsigned long cpu)
{
- return vpa_call(0x7, cpu, 0);
+ return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
}
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x3, cpu, vpa);
+ return vpa_call(H_VPA_REG_SLB, cpu, vpa);
}
static inline long unregister_dtl(unsigned long cpu)
{
- return vpa_call(0x6, cpu, 0);
+ return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
}
static inline long register_dtl(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(0x2, cpu, vpa);
+ return vpa_call(H_VPA_REG_DTL, cpu, vpa);
}
static inline long plpar_page_set_loaned(unsigned long vpa)
@@ -304,4 +311,14 @@ static inline long disable_reloc_on_exceptions(void) {
return plpar_set_mode(0, 3, 0, 0);
}
+static inline long plapr_set_ciabr(unsigned long ciabr)
+{
+ return plpar_set_mode(0, 1, ciabr, 0);
+}
+
+static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
+{
+ return plpar_set_mode(0, 2, dawr0, dawrx0);
+}
+
#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4d806b4..4644efa0 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -23,8 +23,8 @@
#include "pseries.h"
struct cpuidle_driver pseries_idle_driver = {
- .name = "pseries_idle",
- .owner = THIS_MODULE,
+ .name = "pseries_idle",
+ .owner = THIS_MODULE,
};
#define MAX_IDLE_STATE_COUNT 2
@@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_device __percpu *pseries_cpuidle_devices;
static struct cpuidle_state *cpuidle_state_table;
-static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
+static inline void idle_loop_prolog(unsigned long *in_purr)
{
-
- *kt_before = ktime_get();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
@@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
get_lppaca()->idle = 1;
}
-static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
+static inline void idle_loop_epilog(unsigned long in_purr)
{
get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->idle = 0;
-
- return ktime_to_us(ktime_sub(ktime_get(), kt_before));
}
static int snooze_loop(struct cpuidle_device *dev,
@@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
int cpu = dev->cpu;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
@@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev,
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb();
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+ idle_loop_epilog(in_purr);
+
return index;
}
@@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
get_lppaca()->donate_dedicated_cpu = 1;
ppc64_runlatch_off();
@@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor();
get_lppaca()->donate_dedicated_cpu = 0;
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+
+ idle_loop_epilog(in_purr);
+
return index;
}
@@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
int index)
{
unsigned long in_purr;
- ktime_t kt_before;
- idle_loop_prolog(&in_purr, &kt_before);
+ idle_loop_prolog(&in_purr);
/*
* Yield the processor to the hypervisor. We return if
@@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
*/
check_and_cede_processor();
- dev->last_residency =
- (int)idle_loop_epilog(in_purr, kt_before);
+ idle_loop_epilog(in_purr);
+
return index;
}
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9a3dda0..c2a3a25 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -19,7 +19,10 @@ extern void request_event_sources_irqs(struct device_node *np,
#include <linux/of.h>
-extern void __init fw_feature_init(const char *hypertas, unsigned long len);
+extern void __init fw_hypertas_feature_init(const char *hypertas,
+ unsigned long len);
+extern void __init fw_vec5_feature_init(const char *hypertas,
+ unsigned long len);
struct pt_regs;
@@ -60,4 +63,8 @@ extern int dlpar_detach_node(struct device_node *);
/* Snooze Delay, pseries_idle */
DECLARE_PER_CPU(long, smt_snooze_delay);
+/* PCI root bridge prepare function override for pseries */
+struct pci_host_bridge;
+int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index af281dc..a91e6da 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -21,6 +21,7 @@
#include <asm/cputhreads.h>
#include <asm/page.h>
#include <asm/hvcall.h>
+#include <asm/firmware.h>
#define MODULE_VERS "1.0"
@@ -32,40 +33,6 @@ static int sysfs_entries;
/* Helper routines */
-/*
- * Routine to detect firmware support for hcall
- * return 1 if H_BEST_ENERGY is supported
- * else return 0
- */
-
-static int check_for_h_best_energy(void)
-{
- struct device_node *rtas = NULL;
- const char *hypertas, *s;
- int length;
- int rc = 0;
-
- rtas = of_find_node_by_path("/rtas");
- if (!rtas)
- return 0;
-
- hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
- if (!hypertas) {
- of_node_put(rtas);
- return 0;
- }
-
- /* hypertas will have list of strings with hcall names */
- for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
- if (!strncmp("hcall-best-energy-1", s, 19)) {
- rc = 1; /* Found the string */
- break;
- }
- }
- of_node_put(rtas);
- return rc;
-}
-
/* Helper Routines to convert between drc_index to cpu numbers */
static u32 cpu_to_drc_index(int cpu)
@@ -262,7 +229,7 @@ static int __init pseries_energy_init(void)
int cpu, err;
struct device *cpu_dev;
- if (!check_for_h_best_energy()) {
+ if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) {
printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d6491bd..f93cdf5 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -452,7 +452,7 @@ static int proc_ppc64_create_ofdt(void)
ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops);
if (ent)
- ent->size = 0;
+ proc_set_size(ent, 0);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 5544572..b502ab6 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -41,21 +41,16 @@
static unsigned int ibm_scan_log_dump; /* RTAS token */
-static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
+static unsigned int *scanlog_buffer; /* The data buffer */
static ssize_t scanlog_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode * inode = file->f_path.dentry->d_inode;
- struct proc_dir_entry *dp;
- unsigned int *data;
+ unsigned int *data = scanlog_buffer;
int status;
unsigned long len, off;
unsigned int wait_time;
- dp = PDE(inode);
- data = (unsigned int *)dp->data;
-
if (count > RTAS_DATA_BUF_SIZE)
count = RTAS_DATA_BUF_SIZE;
@@ -139,8 +134,7 @@ static ssize_t scanlog_write(struct file * file, const char __user * buf,
static int scanlog_open(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
if (data[0] != 0) {
/* This imperfect test stops a second copy of the
@@ -156,11 +150,9 @@ static int scanlog_open(struct inode * inode, struct file * file)
static int scanlog_release(struct inode * inode, struct file * file)
{
- struct proc_dir_entry *dp = PDE(inode);
- unsigned int *data = (unsigned int *)dp->data;
+ unsigned int *data = scanlog_buffer;
data[0] = 0;
-
return 0;
}
@@ -176,7 +168,6 @@ const struct file_operations scanlog_fops = {
static int __init scanlog_init(void)
{
struct proc_dir_entry *ent;
- void *data;
int err = -ENOMEM;
ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
@@ -184,29 +175,24 @@ static int __init scanlog_init(void)
return -ENODEV;
/* Ideally we could allocate a buffer < 4G */
- data = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!data)
+ scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!scanlog_buffer)
goto err;
- ent = proc_create_data("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
- &scanlog_fops, data);
+ ent = proc_create("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
+ &scanlog_fops);
if (!ent)
goto err;
-
- proc_ppc64_scan_log_dump = ent;
-
return 0;
err:
- kfree(data);
+ kfree(scanlog_buffer);
return err;
}
static void __exit scanlog_cleanup(void)
{
- if (proc_ppc64_scan_log_dump) {
- kfree(proc_ppc64_scan_log_dump->data);
- remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent);
- }
+ remove_proc_entry("powerpc/rtas/scan-log-dump", NULL);
+ kfree(scanlog_buffer);
}
module_init(scanlog_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ca55882..c11c823 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -65,6 +65,7 @@
#include <asm/smp.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
+#include <asm/reg.h>
#include "plpar_wrappers.h"
#include "pseries.h"
@@ -281,7 +282,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
struct kmem_cache *dtl_cache;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Allocate space for the dispatch trace log for all possible cpus
* and register the buffers with the hypervisor. This is used for
@@ -332,12 +333,12 @@ static int alloc_dispatch_logs(void)
return 0;
}
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline int alloc_dispatch_logs(void)
{
return 0;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int alloc_dispatch_log_kmem_cache(void)
{
@@ -375,7 +376,7 @@ static void pSeries_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
-static int __init pSeries_enable_reloc_on_exc(void)
+long pSeries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
@@ -397,9 +398,9 @@ static int __init pSeries_enable_reloc_on_exc(void)
mdelay(delay);
}
}
+EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
-#ifdef CONFIG_KEXEC
-static long pSeries_disable_reloc_on_exc(void)
+long pSeries_disable_reloc_on_exc(void)
{
long rc;
@@ -410,7 +411,9 @@ static long pSeries_disable_reloc_on_exc(void)
mdelay(get_longbusy_msecs(rc));
}
}
+EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
+#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
@@ -463,6 +466,8 @@ static void __init pSeries_setup_arch(void)
else
ppc_md.enable_pmcs = power4_enable_pmcs;
+ ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
+
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
long rc;
if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) {
@@ -498,6 +503,14 @@ static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
}
+static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
+{
+ /* PAPR says we can't set HYP */
+ dawrx &= ~DAWRX_HYP;
+
+ return plapr_set_watchpoint0(dawr, dawrx);
+}
+
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026
@@ -604,6 +617,9 @@ static void __init pSeries_init_early(void)
else if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ ppc_md.set_dawr = pseries_set_dawr;
+
pSeries_cmo_feature_init();
iommu_init_early_pSeries();
@@ -614,25 +630,39 @@ static void __init pSeries_init_early(void)
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init pSeries_probe_hypertas(unsigned long node,
- const char *uname, int depth,
- void *data)
+static int __init pseries_probe_fw_features(unsigned long node,
+ const char *uname, int depth,
+ void *data)
{
- const char *hypertas;
+ const char *prop;
unsigned long len;
+ static int hypertas_found;
+ static int vec5_found;
- if (depth != 1 ||
- (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
+ if (depth != 1)
return 0;
- hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
- if (!hypertas)
- return 1;
+ if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
+ prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
+ &len);
+ if (prop) {
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_hypertas_feature_init(prop, len);
+ }
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_feature_init(hypertas, len);
+ hypertas_found = 1;
+ }
- return 1;
+ if (!strcmp(uname, "chosen")) {
+ prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
+ &len);
+ if (prop)
+ fw_vec5_feature_init(prop, len);
+
+ vec5_found = 1;
+ }
+
+ return hypertas_found && vec5_found;
}
static int __init pSeries_probe(void)
@@ -655,7 +685,7 @@ static int __init pSeries_probe(void)
pr_debug("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pSeries_probe_hypertas, NULL);
+ of_scan_flat_dt(pseries_probe_fw_features, NULL);
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index ffd15e8..ca2d1f6 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -42,6 +42,7 @@
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
+#include <asm/dbell.h>
#include "plpar_wrappers.h"
#include "pseries.h"
@@ -54,6 +55,11 @@
*/
static cpumask_var_t of_spin_mask;
+/*
+ * If we multiplex IPI mechanisms, store the appropriate XICS IPI mechanism here
+ */
+static void (*xics_cause_ipi)(int cpu, unsigned long data);
+
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
int smp_query_cpu_stopped(unsigned int pcpu)
{
@@ -137,6 +143,8 @@ static void smp_xics_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ doorbell_setup_this_cpu();
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
@@ -179,6 +187,27 @@ static int smp_pSeries_kick_cpu(int nr)
return 0;
}
+/* Only used on systems that support multiple IPI mechanisms */
+static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
+{
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id())))
+ doorbell_cause_ipi(cpu, data);
+ else
+ xics_cause_ipi(cpu, data);
+}
+
+static __init int pSeries_smp_probe(void)
+{
+ int ret = xics_smp_probe();
+
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ xics_cause_ipi = smp_ops->cause_ipi;
+ smp_ops->cause_ipi = pSeries_cause_ipi_mux;
+ }
+
+ return ret;
+}
+
static struct smp_ops_t pSeries_mpic_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
@@ -188,8 +217,8 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
static struct smp_ops_t pSeries_xics_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
- .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
- .probe = xics_smp_probe,
+ .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
+ .probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_xics_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e0..5f997e7 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ return -ENOMEM;
+
stream_id = simple_strtoul(buf, NULL, 16);
do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
} while (rc == -EAGAIN);
if (!rc) {
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask,
+ cpu_online_mask);
+ rc = rtas_online_cpus_mask(offline_mask);
+ if (rc) {
+ pr_err("%s: Could not bring present CPUs online.\n",
+ __func__);
+ goto out;
+ }
+
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
+
+ /* Take down CPUs not online prior to suspend */
+ if (!rtas_offline_cpus_mask(offline_mask))
+ pr_warn("%s: Could not restore CPUs to offline "
+ "state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
+out:
+ free_cpumask_var(offline_mask);
return rc;
}
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index a07da1e..8066ddf 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -9,7 +9,6 @@ config PPC_WSP
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
- select PPC_WSP_COPRO
default n
menu "WSP platform selection"
@@ -27,7 +26,3 @@ config PPC_CHROMA
select OF_DYNAMIC
endmenu
-
-config PPC_A2_DD2
- bool "Support for DD2 based A2/WSP systems"
- depends on PPC_A2
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 97fe82e..2d3b1dd 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
wsp_ics_set_xive(ics, hw_irq, xive);
- return 0;
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip wsp_irq_chip = {
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 8e22f56..62cb527 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -502,7 +502,7 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
(~(hose->mem_resources[0].end -
hose->mem_resources[0].start)) & 0x3ffffff0000ul);
out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
- (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
+ (hose->mem_resources[0].start - hose->mem_offset[0]) | 1);
/* Clear all TVT entries
*
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 6c22d91..acdbf95 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -19,6 +19,7 @@ config PPC_MSI_BITMAP
default y if MPIC
default y if FSL_PCI
default y if PPC4xx_MSI
+ default y if POWERNV_MSI
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index fa63186..4a66c65 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
mv64x60_udbg.o
@@ -40,6 +39,7 @@ obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
obj-$(CONFIG_4xx) += uic.o
+obj-$(CONFIG_PPC4xx_OCM) += ppc4xx_ocm.o
obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o
obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
@@ -67,6 +67,8 @@ endif
obj-$(CONFIG_PPC_SCOM) += scom.o
+obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
+
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/bestcomm/Kconfig b/arch/powerpc/sysdev/bestcomm/Kconfig
deleted file mode 100644
index 29e4270..0000000
--- a/arch/powerpc/sysdev/bestcomm/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Kconfig options for Bestcomm
-#
-
-config PPC_BESTCOMM
- tristate "Bestcomm DMA engine support"
- depends on PPC_MPC52xx
- default n
- select PPC_LIB_RHEAP
- help
- BestComm is the name of the communication coprocessor found
- on the Freescale MPC5200 family of processor. Its usage is
- optional for some drivers (like ATA), but required for
- others (like FEC).
-
- If you want to use drivers that require DMA operations,
- answer Y or M. Otherwise say N.
-
-config PPC_BESTCOMM_ATA
- tristate
- depends on PPC_BESTCOMM
- help
- This option enables the support for the ATA task.
-
-config PPC_BESTCOMM_FEC
- tristate
- depends on PPC_BESTCOMM
- help
- This option enables the support for the FEC tasks.
-
-config PPC_BESTCOMM_GEN_BD
- tristate
- depends on PPC_BESTCOMM
- help
- This option enables the support for the GenBD tasks.
-
diff --git a/arch/powerpc/sysdev/bestcomm/Makefile b/arch/powerpc/sysdev/bestcomm/Makefile
deleted file mode 100644
index aed2df2..0000000
--- a/arch/powerpc/sysdev/bestcomm/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for BestComm & co
-#
-
-bestcomm-core-objs := bestcomm.o sram.o
-bestcomm-ata-objs := ata.o bcom_ata_task.o
-bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
-bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
-
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
-obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
-obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
-obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
-
diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c
deleted file mode 100644
index 901c9f9..0000000
--- a/arch/powerpc/sysdev/bestcomm/ata.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Bestcomm ATA task driver
- *
- *
- * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
- * 2003-2004 (c) MontaVista, Software, Inc.
- *
- * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2006 Freescale - John Rigby
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/io.h>
-
-#include "bestcomm.h"
-#include "bestcomm_priv.h"
-#include "ata.h"
-
-
-/* ======================================================================== */
-/* Task image/var/inc */
-/* ======================================================================== */
-
-/* ata task image */
-extern u32 bcom_ata_task[];
-
-/* ata task vars that need to be set before enabling the task */
-struct bcom_ata_var {
- u32 enable; /* (u16*) address of task's control register */
- u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
- u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
- u32 bd_start; /* (struct bcom_bd*) current bd */
- u32 buffer_size; /* size of receive buffer */
-};
-
-/* ata task incs that need to be set before enabling the task */
-struct bcom_ata_inc {
- u16 pad0;
- s16 incr_bytes;
- u16 pad1;
- s16 incr_dst;
- u16 pad2;
- s16 incr_src;
-};
-
-
-/* ======================================================================== */
-/* Task support code */
-/* ======================================================================== */
-
-struct bcom_task *
-bcom_ata_init(int queue_len, int maxbufsize)
-{
- struct bcom_task *tsk;
- struct bcom_ata_var *var;
- struct bcom_ata_inc *inc;
-
- /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
- bcom_disable_prefetch();
-
- tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
- if (!tsk)
- return NULL;
-
- tsk->flags = BCOM_FLAGS_NONE;
-
- bcom_ata_reset_bd(tsk);
-
- var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
- inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
-
- if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
- bcom_task_free(tsk);
- return NULL;
- }
-
- var->enable = bcom_eng->regs_base +
- offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
- var->bd_base = tsk->bd_pa;
- var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
- var->bd_start = tsk->bd_pa;
- var->buffer_size = maxbufsize;
-
- /* Configure some stuff */
- bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
- bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
-
- out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
- out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
-
- out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
-
- return tsk;
-}
-EXPORT_SYMBOL_GPL(bcom_ata_init);
-
-void bcom_ata_rx_prepare(struct bcom_task *tsk)
-{
- struct bcom_ata_inc *inc;
-
- inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
-
- inc->incr_bytes = -(s16)sizeof(u32);
- inc->incr_src = 0;
- inc->incr_dst = sizeof(u32);
-
- bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
-}
-EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
-
-void bcom_ata_tx_prepare(struct bcom_task *tsk)
-{
- struct bcom_ata_inc *inc;
-
- inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
-
- inc->incr_bytes = -(s16)sizeof(u32);
- inc->incr_src = sizeof(u32);
- inc->incr_dst = 0;
-
- bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
-}
-EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
-
-void bcom_ata_reset_bd(struct bcom_task *tsk)
-{
- struct bcom_ata_var *var;
-
- /* Reset all BD */
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
-
- tsk->index = 0;
- tsk->outdex = 0;
-
- var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
- var->bd_start = var->bd_base;
-}
-EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
-
-void bcom_ata_release(struct bcom_task *tsk)
-{
- /* Nothing special for the ATA tasks */
- bcom_task_free(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_ata_release);
-
-
-MODULE_DESCRIPTION("BestComm ATA task driver");
-MODULE_AUTHOR("John Rigby");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h
deleted file mode 100644
index 0b23718..0000000
--- a/arch/powerpc/sysdev/bestcomm/ata.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Header for Bestcomm ATA task driver
- *
- *
- * Copyright (C) 2006 Freescale - John Rigby
- * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __BESTCOMM_ATA_H__
-#define __BESTCOMM_ATA_H__
-
-
-struct bcom_ata_bd {
- u32 status;
- u32 src_pa;
- u32 dst_pa;
-};
-
-extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize);
-extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
-extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
-extern void bcom_ata_reset_bd(struct bcom_task *tsk);
-extern void bcom_ata_release(struct bcom_task *tsk);
-
-#endif /* __BESTCOMM_ATA_H__ */
-
diff --git a/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c b/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c
deleted file mode 100644
index cc6049a..0000000
--- a/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Bestcomm ATA task microcode
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
- */
-
-#include <asm/types.h>
-
-/*
- * The header consists of the following fields:
- * u32 magic;
- * u8 desc_size;
- * u8 var_size;
- * u8 inc_size;
- * u8 first_var;
- * u8 reserved[8];
- *
- * The size fields contain the number of 32-bit words.
- */
-
-u32 bcom_ata_task[] = {
- /* header */
- 0x4243544b,
- 0x0e060709,
- 0x00000000,
- 0x00000000,
-
- /* Task descriptors */
- 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
- 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
- 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
- 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
- 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
- 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
- 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
- 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
- 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
- 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
- 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
- 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
- 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
- 0x000001f8, /* NOP */
-
- /* VAR[9]-VAR[14] */
- 0x40000000,
- 0x7fff7fff,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- /* INC[0]-INC[6] */
- 0x40000000,
- 0xe0000000,
- 0xe0000000,
- 0xa000000c,
- 0x20000000,
- 0x00000000,
- 0x00000000,
-};
-
diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c
deleted file mode 100644
index a1ad6a0..0000000
--- a/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Bestcomm FEC RX task microcode
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
- * on Tue Mar 22 11:19:38 2005 GMT
- */
-
-#include <asm/types.h>
-
-/*
- * The header consists of the following fields:
- * u32 magic;
- * u8 desc_size;
- * u8 var_size;
- * u8 inc_size;
- * u8 first_var;
- * u8 reserved[8];
- *
- * The size fields contain the number of 32-bit words.
- */
-
-u32 bcom_fec_rx_task[] = {
- /* header */
- 0x4243544b,
- 0x18060709,
- 0x00000000,
- 0x00000000,
-
- /* Task descriptors */
- 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
- 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
- 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
- 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
- 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
- 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
- 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
- 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
- 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
- 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
- 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
- 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
- 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
- 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
- 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
- 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
- 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
- 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
- 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
- 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
- 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
- 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
- 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
- 0x000001f8, /* NOP */
-
- /* VAR[9]-VAR[14] */
- 0x40000000,
- 0x7fff7fff,
- 0x00000000,
- 0x00000003,
- 0x40000008,
- 0x43ffffff,
-
- /* INC[0]-INC[6] */
- 0x40000000,
- 0xe0000000,
- 0xe0000000,
- 0xa0000008,
- 0x20000000,
- 0x00000000,
- 0x4000ffff,
-};
-
diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c
deleted file mode 100644
index b1c495c..0000000
--- a/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Bestcomm FEC TX task microcode
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
- * on Tue Mar 22 11:19:29 2005 GMT
- */
-
-#include <asm/types.h>
-
-/*
- * The header consists of the following fields:
- * u32 magic;
- * u8 desc_size;
- * u8 var_size;
- * u8 inc_size;
- * u8 first_var;
- * u8 reserved[8];
- *
- * The size fields contain the number of 32-bit words.
- */
-
-u32 bcom_fec_tx_task[] = {
- /* header */
- 0x4243544b,
- 0x2407070d,
- 0x00000000,
- 0x00000000,
-
- /* Task descriptors */
- 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
- 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
- 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
- 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
- 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
- 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
- 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
- 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
- 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
- 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
- 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
- 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
- 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
- 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
- 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
- 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
- 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
- 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
- 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
- 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
- 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
- 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
- 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
- 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
- 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
- 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
- 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
- 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
- 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
- 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
- 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
- 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
- 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
- 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
- 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
- 0x000001f8, /* NOP */
-
- /* VAR[13]-VAR[19] */
- 0x0c000000,
- 0x40000000,
- 0x7fff7fff,
- 0x00000000,
- 0x00000003,
- 0x40000004,
- 0x43ffffff,
-
- /* INC[0]-INC[6] */
- 0x40000000,
- 0xe0000000,
- 0xe0000000,
- 0xa0000008,
- 0x20000000,
- 0x00000000,
- 0x4000ffff,
-};
-
diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c
deleted file mode 100644
index efee022..0000000
--- a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Bestcomm GenBD RX task microcode
- *
- * Copyright (C) 2006 AppSpec Computer Technologies Corp.
- * Jeff Gibbons <jeff.gibbons@appspec.com>
- * Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
- * on Tue Mar 4 10:14:12 2006 GMT
- *
- */
-
-#include <asm/types.h>
-
-/*
- * The header consists of the following fields:
- * u32 magic;
- * u8 desc_size;
- * u8 var_size;
- * u8 inc_size;
- * u8 first_var;
- * u8 reserved[8];
- *
- * The size fields contain the number of 32-bit words.
- */
-
-u32 bcom_gen_bd_rx_task[] = {
- /* header */
- 0x4243544b,
- 0x0d020409,
- 0x00000000,
- 0x00000000,
-
- /* Task descriptors */
- 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
- 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
- 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
- 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
- 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
- 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
- 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
- 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
- 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
- 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
- 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
- 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
- 0x000001f8, /* NOP */
-
- /* VAR[9]-VAR[10] */
- 0x40000000,
- 0x7fff7fff,
-
- /* INC[0]-INC[3] */
- 0x40000000,
- 0xe0000000,
- 0xa0000008,
- 0x20000000,
-};
-
diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c
deleted file mode 100644
index c605aa4..0000000
--- a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Bestcomm GenBD TX task microcode
- *
- * Copyright (C) 2006 AppSpec Computer Technologies Corp.
- * Jeff Gibbons <jeff.gibbons@appspec.com>
- * Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
- * on Tue Mar 4 10:14:12 2006 GMT
- *
- */
-
-#include <asm/types.h>
-
-/*
- * The header consists of the following fields:
- * u32 magic;
- * u8 desc_size;
- * u8 var_size;
- * u8 inc_size;
- * u8 first_var;
- * u8 reserved[8];
- *
- * The size fields contain the number of 32-bit words.
- */
-
-u32 bcom_gen_bd_tx_task[] = {
- /* header */
- 0x4243544b,
- 0x0f040609,
- 0x00000000,
- 0x00000000,
-
- /* Task descriptors */
- 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
- 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
- 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
- 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
- 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
- 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
- 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
- 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
- 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
- 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
- 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
- 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
- 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
- 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
- 0x000001f8, /* NOP */
-
- /* VAR[9]-VAR[12] */
- 0x40000000,
- 0x7fff7fff,
- 0x00000000,
- 0x40000004,
-
- /* INC[0]-INC[5] */
- 0x40000000,
- 0xe0000000,
- 0xe0000000,
- 0xa0000008,
- 0x20000000,
- 0x4000ffff,
-};
-
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
deleted file mode 100644
index d913063..0000000
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Driver for MPC52xx processor BestComm peripheral controller
- *
- *
- * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2005 Varma Electronics Oy,
- * ( by Andrey Volkov <avolkov@varma-el.com> )
- * Copyright (C) 2003-2004 MontaVista, Software, Inc.
- * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mpc52xx.h>
-
-#include "sram.h"
-#include "bestcomm_priv.h"
-#include "bestcomm.h"
-
-#define DRIVER_NAME "bestcomm-core"
-
-/* MPC5200 device tree match tables */
-static struct of_device_id mpc52xx_sram_ids[] = {
- { .compatible = "fsl,mpc5200-sram", },
- { .compatible = "mpc5200-sram", },
- {}
-};
-
-
-struct bcom_engine *bcom_eng = NULL;
-EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
-
-/* ======================================================================== */
-/* Public and private API */
-/* ======================================================================== */
-
-/* Private API */
-
-struct bcom_task *
-bcom_task_alloc(int bd_count, int bd_size, int priv_size)
-{
- int i, tasknum = -1;
- struct bcom_task *tsk;
-
- /* Don't try to do anything if bestcomm init failed */
- if (!bcom_eng)
- return NULL;
-
- /* Get and reserve a task num */
- spin_lock(&bcom_eng->lock);
-
- for (i=0; i<BCOM_MAX_TASKS; i++)
- if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
- bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
- tasknum = i;
- break;
- }
-
- spin_unlock(&bcom_eng->lock);
-
- if (tasknum < 0)
- return NULL;
-
- /* Allocate our structure */
- tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
- if (!tsk)
- goto error;
-
- tsk->tasknum = tasknum;
- if (priv_size)
- tsk->priv = (void*)tsk + sizeof(struct bcom_task);
-
- /* Get IRQ of that task */
- tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
- if (tsk->irq == NO_IRQ)
- goto error;
-
- /* Init the BDs, if needed */
- if (bd_count) {
- tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
- if (!tsk->cookie)
- goto error;
-
- tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
- if (!tsk->bd)
- goto error;
- memset(tsk->bd, 0x00, bd_count * bd_size);
-
- tsk->num_bd = bd_count;
- tsk->bd_size = bd_size;
- }
-
- return tsk;
-
-error:
- if (tsk) {
- if (tsk->irq != NO_IRQ)
- irq_dispose_mapping(tsk->irq);
- bcom_sram_free(tsk->bd);
- kfree(tsk->cookie);
- kfree(tsk);
- }
-
- bcom_eng->tdt[tasknum].stop = 0;
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(bcom_task_alloc);
-
-void
-bcom_task_free(struct bcom_task *tsk)
-{
- /* Stop the task */
- bcom_disable_task(tsk->tasknum);
-
- /* Clear TDT */
- bcom_eng->tdt[tsk->tasknum].start = 0;
- bcom_eng->tdt[tsk->tasknum].stop = 0;
-
- /* Free everything */
- irq_dispose_mapping(tsk->irq);
- bcom_sram_free(tsk->bd);
- kfree(tsk->cookie);
- kfree(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_task_free);
-
-int
-bcom_load_image(int task, u32 *task_image)
-{
- struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
- struct bcom_tdt *tdt;
- u32 *desc, *var, *inc;
- u32 *desc_src, *var_src, *inc_src;
-
- /* Safety checks */
- if (hdr->magic != BCOM_TASK_MAGIC) {
- printk(KERN_ERR DRIVER_NAME
- ": Trying to load invalid microcode\n");
- return -EINVAL;
- }
-
- if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
- printk(KERN_ERR DRIVER_NAME
- ": Trying to load invalid task %d\n", task);
- return -EINVAL;
- }
-
- /* Initial load or reload */
- tdt = &bcom_eng->tdt[task];
-
- if (tdt->start) {
- desc = bcom_task_desc(task);
- if (hdr->desc_size != bcom_task_num_descs(task)) {
- printk(KERN_ERR DRIVER_NAME
- ": Trying to reload wrong task image "
- "(%d size %d/%d)!\n",
- task,
- hdr->desc_size,
- bcom_task_num_descs(task));
- return -EINVAL;
- }
- } else {
- phys_addr_t start_pa;
-
- desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
- if (!desc)
- return -ENOMEM;
-
- tdt->start = start_pa;
- tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
- }
-
- var = bcom_task_var(task);
- inc = bcom_task_inc(task);
-
- /* Clear & copy */
- memset(var, 0x00, BCOM_VAR_SIZE);
- memset(inc, 0x00, BCOM_INC_SIZE);
-
- desc_src = (u32 *)(hdr + 1);
- var_src = desc_src + hdr->desc_size;
- inc_src = var_src + hdr->var_size;
-
- memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
- memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
- memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bcom_load_image);
-
-void
-bcom_set_initiator(int task, int initiator)
-{
- int i;
- int num_descs;
- u32 *desc;
- int next_drd_has_initiator;
-
- bcom_set_tcr_initiator(task, initiator);
-
- /* Just setting tcr is apparently not enough due to some problem */
- /* with it. So we just go thru all the microcode and replace in */
- /* the DRD directly */
-
- desc = bcom_task_desc(task);
- next_drd_has_initiator = 1;
- num_descs = bcom_task_num_descs(task);
-
- for (i=0; i<num_descs; i++, desc++) {
- if (!bcom_desc_is_drd(*desc))
- continue;
- if (next_drd_has_initiator)
- if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
- bcom_set_desc_initiator(desc, initiator);
- next_drd_has_initiator = !bcom_drd_is_extended(*desc);
- }
-}
-EXPORT_SYMBOL_GPL(bcom_set_initiator);
-
-
-/* Public API */
-
-void
-bcom_enable(struct bcom_task *tsk)
-{
- bcom_enable_task(tsk->tasknum);
-}
-EXPORT_SYMBOL_GPL(bcom_enable);
-
-void
-bcom_disable(struct bcom_task *tsk)
-{
- bcom_disable_task(tsk->tasknum);
-}
-EXPORT_SYMBOL_GPL(bcom_disable);
-
-
-/* ======================================================================== */
-/* Engine init/cleanup */
-/* ======================================================================== */
-
-/* Function Descriptor table */
-/* this will need to be updated if Freescale changes their task code FDT */
-static u32 fdt_ops[] = {
- 0xa0045670, /* FDT[48] - load_acc() */
- 0x80045670, /* FDT[49] - unload_acc() */
- 0x21800000, /* FDT[50] - and() */
- 0x21e00000, /* FDT[51] - or() */
- 0x21500000, /* FDT[52] - xor() */
- 0x21400000, /* FDT[53] - andn() */
- 0x21500000, /* FDT[54] - not() */
- 0x20400000, /* FDT[55] - add() */
- 0x20500000, /* FDT[56] - sub() */
- 0x20800000, /* FDT[57] - lsh() */
- 0x20a00000, /* FDT[58] - rsh() */
- 0xc0170000, /* FDT[59] - crc8() */
- 0xc0145670, /* FDT[60] - crc16() */
- 0xc0345670, /* FDT[61] - crc32() */
- 0xa0076540, /* FDT[62] - endian32() */
- 0xa0000760, /* FDT[63] - endian16() */
-};
-
-
-static int bcom_engine_init(void)
-{
- int task;
- phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
- unsigned int tdt_size, ctx_size, var_size, fdt_size;
-
- /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
- tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
- ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
- var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
- fdt_size = BCOM_FDT_SIZE;
-
- bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
- bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
- bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
- bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
-
- if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
- printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
-
- bcom_sram_free(bcom_eng->tdt);
- bcom_sram_free(bcom_eng->ctx);
- bcom_sram_free(bcom_eng->var);
- bcom_sram_free(bcom_eng->fdt);
-
- return -ENOMEM;
- }
-
- memset(bcom_eng->tdt, 0x00, tdt_size);
- memset(bcom_eng->ctx, 0x00, ctx_size);
- memset(bcom_eng->var, 0x00, var_size);
- memset(bcom_eng->fdt, 0x00, fdt_size);
-
- /* Copy the FDT for the EU#3 */
- memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
-
- /* Initialize Task base structure */
- for (task=0; task<BCOM_MAX_TASKS; task++)
- {
- out_be16(&bcom_eng->regs->tcr[task], 0);
- out_8(&bcom_eng->regs->ipr[task], 0);
-
- bcom_eng->tdt[task].context = ctx_pa;
- bcom_eng->tdt[task].var = var_pa;
- bcom_eng->tdt[task].fdt = fdt_pa;
-
- var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
- ctx_pa += BCOM_CTX_SIZE;
- }
-
- out_be32(&bcom_eng->regs->taskBar, tdt_pa);
-
- /* Init 'always' initiator */
- out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
-
- /* Disable COMM Bus Prefetch on the original 5200; it's broken */
- if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
- bcom_disable_prefetch();
-
- /* Init lock */
- spin_lock_init(&bcom_eng->lock);
-
- return 0;
-}
-
-static void
-bcom_engine_cleanup(void)
-{
- int task;
-
- /* Stop all tasks */
- for (task=0; task<BCOM_MAX_TASKS; task++)
- {
- out_be16(&bcom_eng->regs->tcr[task], 0);
- out_8(&bcom_eng->regs->ipr[task], 0);
- }
-
- out_be32(&bcom_eng->regs->taskBar, 0ul);
-
- /* Release the SRAM zones */
- bcom_sram_free(bcom_eng->tdt);
- bcom_sram_free(bcom_eng->ctx);
- bcom_sram_free(bcom_eng->var);
- bcom_sram_free(bcom_eng->fdt);
-}
-
-
-/* ======================================================================== */
-/* OF platform driver */
-/* ======================================================================== */
-
-static int mpc52xx_bcom_probe(struct platform_device *op)
-{
- struct device_node *ofn_sram;
- struct resource res_bcom;
-
- int rv;
-
- /* Inform user we're ok so far */
- printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
-
- /* Get the bestcomm node */
- of_node_get(op->dev.of_node);
-
- /* Prepare SRAM */
- ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
- if (!ofn_sram) {
- printk(KERN_ERR DRIVER_NAME ": "
- "No SRAM found in device tree\n");
- rv = -ENODEV;
- goto error_ofput;
- }
- rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
- of_node_put(ofn_sram);
-
- if (rv) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Error in SRAM init\n");
- goto error_ofput;
- }
-
- /* Get a clean struct */
- bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
- if (!bcom_eng) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't allocate state structure\n");
- rv = -ENOMEM;
- goto error_sramclean;
- }
-
- /* Save the node */
- bcom_eng->ofnode = op->dev.of_node;
-
- /* Get, reserve & map io */
- if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't get resource\n");
- rv = -EINVAL;
- goto error_sramclean;
- }
-
- if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma),
- DRIVER_NAME)) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't request registers region\n");
- rv = -EBUSY;
- goto error_sramclean;
- }
-
- bcom_eng->regs_base = res_bcom.start;
- bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
- if (!bcom_eng->regs) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't map registers\n");
- rv = -ENOMEM;
- goto error_release;
- }
-
- /* Now, do the real init */
- rv = bcom_engine_init();
- if (rv)
- goto error_unmap;
-
- /* Done ! */
- printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
- (long)bcom_eng->regs_base);
-
- return 0;
-
- /* Error path */
-error_unmap:
- iounmap(bcom_eng->regs);
-error_release:
- release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
-error_sramclean:
- kfree(bcom_eng);
- bcom_sram_cleanup();
-error_ofput:
- of_node_put(op->dev.of_node);
-
- printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
-
- return rv;
-}
-
-
-static int mpc52xx_bcom_remove(struct platform_device *op)
-{
- /* Clean up the engine */
- bcom_engine_cleanup();
-
- /* Cleanup SRAM */
- bcom_sram_cleanup();
-
- /* Release regs */
- iounmap(bcom_eng->regs);
- release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
-
- /* Release the node */
- of_node_put(bcom_eng->ofnode);
-
- /* Release memory */
- kfree(bcom_eng);
- bcom_eng = NULL;
-
- return 0;
-}
-
-static struct of_device_id mpc52xx_bcom_of_match[] = {
- { .compatible = "fsl,mpc5200-bestcomm", },
- { .compatible = "mpc5200-bestcomm", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
-
-
-static struct platform_driver mpc52xx_bcom_of_platform_driver = {
- .probe = mpc52xx_bcom_probe,
- .remove = mpc52xx_bcom_remove,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- .of_match_table = mpc52xx_bcom_of_match,
- },
-};
-
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_bcom_init(void)
-{
- return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
-}
-
-static void __exit
-mpc52xx_bcom_exit(void)
-{
- platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
-}
-
-/* If we're not a module, we must make sure everything is setup before */
-/* anyone tries to use us ... that's why we use subsys_initcall instead */
-/* of module_init. */
-subsys_initcall(mpc52xx_bcom_init);
-module_exit(mpc52xx_bcom_exit);
-
-MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
-MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
-MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
-MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
deleted file mode 100644
index a0e2e6b..0000000
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Public header for the MPC52xx processor BestComm driver
- *
- *
- * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2005 Varma Electronics Oy,
- * ( by Andrey Volkov <avolkov@varma-el.com> )
- * Copyright (C) 2003-2004 MontaVista, Software, Inc.
- * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __BESTCOMM_H__
-#define __BESTCOMM_H__
-
-/**
- * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
- * @status: The current status of this buffer. Exact meaning depends on the
- * task type
- * @data: An array of u32 extra data. Size of array is task dependent.
- *
- * Note: Don't dereference a bcom_bd pointer as an array. The size of the
- * bcom_bd is variable. Use bcom_get_bd() instead.
- */
-struct bcom_bd {
- u32 status;
- u32 data[0]; /* variable payload size */
-};
-
-/* ======================================================================== */
-/* Generic task management */
-/* ======================================================================== */
-
-/**
- * struct bcom_task - Structure describing a loaded BestComm task
- *
- * This structure is never built by the driver it self. It's built and
- * filled the intermediate layer of the BestComm API, the task dependent
- * support code.
- *
- * Most likely you don't need to poke around inside this structure. The
- * fields are exposed in the header just for the sake of inline functions
- */
-struct bcom_task {
- unsigned int tasknum;
- unsigned int flags;
- int irq;
-
- struct bcom_bd *bd;
- phys_addr_t bd_pa;
- void **cookie;
- unsigned short index;
- unsigned short outdex;
- unsigned int num_bd;
- unsigned int bd_size;
-
- void* priv;
-};
-
-#define BCOM_FLAGS_NONE 0x00000000ul
-#define BCOM_FLAGS_ENABLE_TASK (1ul << 0)
-
-/**
- * bcom_enable - Enable a BestComm task
- * @tsk: The BestComm task structure
- *
- * This function makes sure the given task is enabled and can be run
- * by the BestComm engine as needed
- */
-extern void bcom_enable(struct bcom_task *tsk);
-
-/**
- * bcom_disable - Disable a BestComm task
- * @tsk: The BestComm task structure
- *
- * This function disable a given task, making sure it's not executed
- * by the BestComm engine.
- */
-extern void bcom_disable(struct bcom_task *tsk);
-
-
-/**
- * bcom_get_task_irq - Returns the irq number of a BestComm task
- * @tsk: The BestComm task structure
- */
-static inline int
-bcom_get_task_irq(struct bcom_task *tsk) {
- return tsk->irq;
-}
-
-/* ======================================================================== */
-/* BD based tasks helpers */
-/* ======================================================================== */
-
-#define BCOM_BD_READY 0x40000000ul
-
-/** _bcom_next_index - Get next input index.
- * @tsk: pointer to task structure
- *
- * Support function; Device drivers should not call this
- */
-static inline int
-_bcom_next_index(struct bcom_task *tsk)
-{
- return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1;
-}
-
-/** _bcom_next_outdex - Get next output index.
- * @tsk: pointer to task structure
- *
- * Support function; Device drivers should not call this
- */
-static inline int
-_bcom_next_outdex(struct bcom_task *tsk)
-{
- return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1;
-}
-
-/**
- * bcom_queue_empty - Checks if a BestComm task BD queue is empty
- * @tsk: The BestComm task structure
- */
-static inline int
-bcom_queue_empty(struct bcom_task *tsk)
-{
- return tsk->index == tsk->outdex;
-}
-
-/**
- * bcom_queue_full - Checks if a BestComm task BD queue is full
- * @tsk: The BestComm task structure
- */
-static inline int
-bcom_queue_full(struct bcom_task *tsk)
-{
- return tsk->outdex == _bcom_next_index(tsk);
-}
-
-/**
- * bcom_get_bd - Get a BD from the queue
- * @tsk: The BestComm task structure
- * index: Index of the BD to fetch
- */
-static inline struct bcom_bd
-*bcom_get_bd(struct bcom_task *tsk, unsigned int index)
-{
- /* A cast to (void*) so the address can be incremented by the
- * real size instead of by sizeof(struct bcom_bd) */
- return ((void *)tsk->bd) + (index * tsk->bd_size);
-}
-
-/**
- * bcom_buffer_done - Checks if a BestComm
- * @tsk: The BestComm task structure
- */
-static inline int
-bcom_buffer_done(struct bcom_task *tsk)
-{
- struct bcom_bd *bd;
- if (bcom_queue_empty(tsk))
- return 0;
-
- bd = bcom_get_bd(tsk, tsk->outdex);
- return !(bd->status & BCOM_BD_READY);
-}
-
-/**
- * bcom_prepare_next_buffer - clear status of next available buffer.
- * @tsk: The BestComm task structure
- *
- * Returns pointer to next buffer descriptor
- */
-static inline struct bcom_bd *
-bcom_prepare_next_buffer(struct bcom_task *tsk)
-{
- struct bcom_bd *bd;
-
- bd = bcom_get_bd(tsk, tsk->index);
- bd->status = 0; /* cleanup last status */
- return bd;
-}
-
-static inline void
-bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
-{
- struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
-
- tsk->cookie[tsk->index] = cookie;
- mb(); /* ensure the bd is really up-to-date */
- bd->status |= BCOM_BD_READY;
- tsk->index = _bcom_next_index(tsk);
- if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
- bcom_enable(tsk);
-}
-
-static inline void *
-bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
-{
- void *cookie = tsk->cookie[tsk->outdex];
- struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
-
- if (p_status)
- *p_status = bd->status;
- if (p_bd)
- *p_bd = bd;
- tsk->outdex = _bcom_next_outdex(tsk);
- return cookie;
-}
-
-#endif /* __BESTCOMM_H__ */
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
deleted file mode 100644
index 3b52f3f..0000000
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Private header for the MPC52xx processor BestComm driver
- *
- * By private, we mean that driver should not use it directly. It's meant
- * to be used by the BestComm engine driver itself and by the intermediate
- * layer between the core and the drivers.
- *
- * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2005 Varma Electronics Oy,
- * ( by Andrey Volkov <avolkov@varma-el.com> )
- * Copyright (C) 2003-2004 MontaVista, Software, Inc.
- * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __BESTCOMM_PRIV_H__
-#define __BESTCOMM_PRIV_H__
-
-#include <linux/spinlock.h>
-#include <linux/of.h>
-#include <asm/io.h>
-#include <asm/mpc52xx.h>
-
-#include "sram.h"
-
-
-/* ======================================================================== */
-/* Engine related stuff */
-/* ======================================================================== */
-
-/* Zones sizes and needed alignments */
-#define BCOM_MAX_TASKS 16
-#define BCOM_MAX_VAR 24
-#define BCOM_MAX_INC 8
-#define BCOM_MAX_FDT 64
-#define BCOM_MAX_CTX 20
-#define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32))
-#define BCOM_CTX_ALIGN 0x100
-#define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32))
-#define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32))
-#define BCOM_VAR_ALIGN 0x80
-#define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32))
-#define BCOM_FDT_ALIGN 0x100
-
-/**
- * struct bcom_tdt - Task Descriptor Table Entry
- *
- */
-struct bcom_tdt {
- u32 start;
- u32 stop;
- u32 var;
- u32 fdt;
- u32 exec_status; /* used internally by BestComm engine */
- u32 mvtp; /* used internally by BestComm engine */
- u32 context;
- u32 litbase;
-};
-
-/**
- * struct bcom_engine
- *
- * This holds all info needed globaly to handle the engine
- */
-struct bcom_engine {
- struct device_node *ofnode;
- struct mpc52xx_sdma __iomem *regs;
- phys_addr_t regs_base;
-
- struct bcom_tdt *tdt;
- u32 *ctx;
- u32 *var;
- u32 *fdt;
-
- spinlock_t lock;
-};
-
-extern struct bcom_engine *bcom_eng;
-
-
-/* ======================================================================== */
-/* Tasks related stuff */
-/* ======================================================================== */
-
-/* Tasks image header */
-#define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */
-
-struct bcom_task_header {
- u32 magic;
- u8 desc_size; /* the size fields */
- u8 var_size; /* are given in number */
- u8 inc_size; /* of 32-bits words */
- u8 first_var;
- u8 reserved[8];
-};
-
-/* Descriptors structure & co */
-#define BCOM_DESC_NOP 0x000001f8
-#define BCOM_LCD_MASK 0x80000000
-#define BCOM_DRD_EXTENDED 0x40000000
-#define BCOM_DRD_INITIATOR_SHIFT 21
-
-/* Tasks pragma */
-#define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */
-#define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */
- /* 1=iter end */
-#define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */
- /* task enable */
-#define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */
-#define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */
- /* 0=frac(msb), 1=int(lsb) */
-#define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */
-#define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */
-#define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */
-
- /* Looks like XLB speculative read generates XLB errors when a buffer
- * is at the end of the physical memory. i.e. when accessing the
- * lasts words, the engine tries to prefetch the next but there is no
- * next ...
- */
-#define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \
- (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \
- (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \
- (0 << BCOM_PRAGMA_BIT_PACK) | \
- (0 << BCOM_PRAGMA_BIT_INTEGER) | \
- (0 << BCOM_PRAGMA_BIT_SPECREAD) | \
- (1 << BCOM_PRAGMA_BIT_CW) | \
- (1 << BCOM_PRAGMA_BIT_RL))
-
-#define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \
- (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \
- (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \
- (0 << BCOM_PRAGMA_BIT_PACK) | \
- (1 << BCOM_PRAGMA_BIT_INTEGER) | \
- (0 << BCOM_PRAGMA_BIT_SPECREAD) | \
- (1 << BCOM_PRAGMA_BIT_CW) | \
- (1 << BCOM_PRAGMA_BIT_RL))
-
-#define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA
-#define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA
-#define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA
-
-/* Initiators number */
-#define BCOM_INITIATOR_ALWAYS 0
-#define BCOM_INITIATOR_SCTMR_0 1
-#define BCOM_INITIATOR_SCTMR_1 2
-#define BCOM_INITIATOR_FEC_RX 3
-#define BCOM_INITIATOR_FEC_TX 4
-#define BCOM_INITIATOR_ATA_RX 5
-#define BCOM_INITIATOR_ATA_TX 6
-#define BCOM_INITIATOR_SCPCI_RX 7
-#define BCOM_INITIATOR_SCPCI_TX 8
-#define BCOM_INITIATOR_PSC3_RX 9
-#define BCOM_INITIATOR_PSC3_TX 10
-#define BCOM_INITIATOR_PSC2_RX 11
-#define BCOM_INITIATOR_PSC2_TX 12
-#define BCOM_INITIATOR_PSC1_RX 13
-#define BCOM_INITIATOR_PSC1_TX 14
-#define BCOM_INITIATOR_SCTMR_2 15
-#define BCOM_INITIATOR_SCLPC 16
-#define BCOM_INITIATOR_PSC5_RX 17
-#define BCOM_INITIATOR_PSC5_TX 18
-#define BCOM_INITIATOR_PSC4_RX 19
-#define BCOM_INITIATOR_PSC4_TX 20
-#define BCOM_INITIATOR_I2C2_RX 21
-#define BCOM_INITIATOR_I2C2_TX 22
-#define BCOM_INITIATOR_I2C1_RX 23
-#define BCOM_INITIATOR_I2C1_TX 24
-#define BCOM_INITIATOR_PSC6_RX 25
-#define BCOM_INITIATOR_PSC6_TX 26
-#define BCOM_INITIATOR_IRDA_RX 25
-#define BCOM_INITIATOR_IRDA_TX 26
-#define BCOM_INITIATOR_SCTMR_3 27
-#define BCOM_INITIATOR_SCTMR_4 28
-#define BCOM_INITIATOR_SCTMR_5 29
-#define BCOM_INITIATOR_SCTMR_6 30
-#define BCOM_INITIATOR_SCTMR_7 31
-
-/* Initiators priorities */
-#define BCOM_IPR_ALWAYS 7
-#define BCOM_IPR_SCTMR_0 2
-#define BCOM_IPR_SCTMR_1 2
-#define BCOM_IPR_FEC_RX 6
-#define BCOM_IPR_FEC_TX 5
-#define BCOM_IPR_ATA_RX 7
-#define BCOM_IPR_ATA_TX 7
-#define BCOM_IPR_SCPCI_RX 2
-#define BCOM_IPR_SCPCI_TX 2
-#define BCOM_IPR_PSC3_RX 2
-#define BCOM_IPR_PSC3_TX 2
-#define BCOM_IPR_PSC2_RX 2
-#define BCOM_IPR_PSC2_TX 2
-#define BCOM_IPR_PSC1_RX 2
-#define BCOM_IPR_PSC1_TX 2
-#define BCOM_IPR_SCTMR_2 2
-#define BCOM_IPR_SCLPC 2
-#define BCOM_IPR_PSC5_RX 2
-#define BCOM_IPR_PSC5_TX 2
-#define BCOM_IPR_PSC4_RX 2
-#define BCOM_IPR_PSC4_TX 2
-#define BCOM_IPR_I2C2_RX 2
-#define BCOM_IPR_I2C2_TX 2
-#define BCOM_IPR_I2C1_RX 2
-#define BCOM_IPR_I2C1_TX 2
-#define BCOM_IPR_PSC6_RX 2
-#define BCOM_IPR_PSC6_TX 2
-#define BCOM_IPR_IRDA_RX 2
-#define BCOM_IPR_IRDA_TX 2
-#define BCOM_IPR_SCTMR_3 2
-#define BCOM_IPR_SCTMR_4 2
-#define BCOM_IPR_SCTMR_5 2
-#define BCOM_IPR_SCTMR_6 2
-#define BCOM_IPR_SCTMR_7 2
-
-
-/* ======================================================================== */
-/* API */
-/* ======================================================================== */
-
-extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size);
-extern void bcom_task_free(struct bcom_task *tsk);
-extern int bcom_load_image(int task, u32 *task_image);
-extern void bcom_set_initiator(int task, int initiator);
-
-
-#define TASK_ENABLE 0x8000
-
-/**
- * bcom_disable_prefetch - Hook to disable bus prefetching
- *
- * ATA DMA and the original MPC5200 need this due to silicon bugs. At the
- * moment disabling prefetch is a one-way street. There is no mechanism
- * in place to turn prefetch back on after it has been disabled. There is
- * no reason it couldn't be done, it would just be more complex to implement.
- */
-static inline void bcom_disable_prefetch(void)
-{
- u16 regval;
-
- regval = in_be16(&bcom_eng->regs->PtdCntrl);
- out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
-};
-
-static inline void
-bcom_enable_task(int task)
-{
- u16 reg;
- reg = in_be16(&bcom_eng->regs->tcr[task]);
- out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE);
-}
-
-static inline void
-bcom_disable_task(int task)
-{
- u16 reg = in_be16(&bcom_eng->regs->tcr[task]);
- out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE);
-}
-
-
-static inline u32 *
-bcom_task_desc(int task)
-{
- return bcom_sram_pa2va(bcom_eng->tdt[task].start);
-}
-
-static inline int
-bcom_task_num_descs(int task)
-{
- return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1;
-}
-
-static inline u32 *
-bcom_task_var(int task)
-{
- return bcom_sram_pa2va(bcom_eng->tdt[task].var);
-}
-
-static inline u32 *
-bcom_task_inc(int task)
-{
- return &bcom_task_var(task)[BCOM_MAX_VAR];
-}
-
-
-static inline int
-bcom_drd_is_extended(u32 desc)
-{
- return (desc) & BCOM_DRD_EXTENDED;
-}
-
-static inline int
-bcom_desc_is_drd(u32 desc)
-{
- return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP;
-}
-
-static inline int
-bcom_desc_initiator(u32 desc)
-{
- return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f;
-}
-
-static inline void
-bcom_set_desc_initiator(u32 *desc, int initiator)
-{
- *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) |
- ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT);
-}
-
-
-static inline void
-bcom_set_task_pragma(int task, int pragma)
-{
- u32 *fdt = &bcom_eng->tdt[task].fdt;
- *fdt = (*fdt & ~0xff) | pragma;
-}
-
-static inline void
-bcom_set_task_auto_start(int task, int next_task)
-{
- u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
- out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task);
-}
-
-static inline void
-bcom_set_tcr_initiator(int task, int initiator)
-{
- u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
- out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8));
-}
-
-
-#endif /* __BESTCOMM_PRIV_H__ */
-
diff --git a/arch/powerpc/sysdev/bestcomm/fec.c b/arch/powerpc/sysdev/bestcomm/fec.c
deleted file mode 100644
index 957a988..0000000
--- a/arch/powerpc/sysdev/bestcomm/fec.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Bestcomm FEC tasks driver
- *
- *
- * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003-2004 MontaVista, Software, Inc.
- * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/io.h>
-
-#include "bestcomm.h"
-#include "bestcomm_priv.h"
-#include "fec.h"
-
-
-/* ======================================================================== */
-/* Task image/var/inc */
-/* ======================================================================== */
-
-/* fec tasks images */
-extern u32 bcom_fec_rx_task[];
-extern u32 bcom_fec_tx_task[];
-
-/* rx task vars that need to be set before enabling the task */
-struct bcom_fec_rx_var {
- u32 enable; /* (u16*) address of task's control register */
- u32 fifo; /* (u32*) address of fec's fifo */
- u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
- u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
- u32 bd_start; /* (struct bcom_bd*) current bd */
- u32 buffer_size; /* size of receive buffer */
-};
-
-/* rx task incs that need to be set before enabling the task */
-struct bcom_fec_rx_inc {
- u16 pad0;
- s16 incr_bytes;
- u16 pad1;
- s16 incr_dst;
- u16 pad2;
- s16 incr_dst_ma;
-};
-
-/* tx task vars that need to be set before enabling the task */
-struct bcom_fec_tx_var {
- u32 DRD; /* (u32*) address of self-modified DRD */
- u32 fifo; /* (u32*) address of fec's fifo */
- u32 enable; /* (u16*) address of task's control register */
- u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
- u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
- u32 bd_start; /* (struct bcom_bd*) current bd */
- u32 buffer_size; /* set by uCode for each packet */
-};
-
-/* tx task incs that need to be set before enabling the task */
-struct bcom_fec_tx_inc {
- u16 pad0;
- s16 incr_bytes;
- u16 pad1;
- s16 incr_src;
- u16 pad2;
- s16 incr_src_ma;
-};
-
-/* private structure in the task */
-struct bcom_fec_priv {
- phys_addr_t fifo;
- int maxbufsize;
-};
-
-
-/* ======================================================================== */
-/* Task support code */
-/* ======================================================================== */
-
-struct bcom_task *
-bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
-{
- struct bcom_task *tsk;
- struct bcom_fec_priv *priv;
-
- tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
- sizeof(struct bcom_fec_priv));
- if (!tsk)
- return NULL;
-
- tsk->flags = BCOM_FLAGS_NONE;
-
- priv = tsk->priv;
- priv->fifo = fifo;
- priv->maxbufsize = maxbufsize;
-
- if (bcom_fec_rx_reset(tsk)) {
- bcom_task_free(tsk);
- return NULL;
- }
-
- return tsk;
-}
-EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
-
-int
-bcom_fec_rx_reset(struct bcom_task *tsk)
-{
- struct bcom_fec_priv *priv = tsk->priv;
- struct bcom_fec_rx_var *var;
- struct bcom_fec_rx_inc *inc;
-
- /* Shutdown the task */
- bcom_disable_task(tsk->tasknum);
-
- /* Reset the microcode */
- var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
- inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
-
- if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
- return -1;
-
- var->enable = bcom_eng->regs_base +
- offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
- var->fifo = (u32) priv->fifo;
- var->bd_base = tsk->bd_pa;
- var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
- var->bd_start = tsk->bd_pa;
- var->buffer_size = priv->maxbufsize;
-
- inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
- inc->incr_dst = sizeof(u32); /* task image, but we stick */
- inc->incr_dst_ma= sizeof(u8); /* to the official ones */
-
- /* Reset the BDs */
- tsk->index = 0;
- tsk->outdex = 0;
-
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
-
- /* Configure some stuff */
- bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
- bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
-
- out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
-
- out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
-
-void
-bcom_fec_rx_release(struct bcom_task *tsk)
-{
- /* Nothing special for the FEC tasks */
- bcom_task_free(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
-
-
-
- /* Return 2nd to last DRD */
- /* This is an ugly hack, but at least it's only done
- once at initialization */
-static u32 *self_modified_drd(int tasknum)
-{
- u32 *desc;
- int num_descs;
- int drd_count;
- int i;
-
- num_descs = bcom_task_num_descs(tasknum);
- desc = bcom_task_desc(tasknum) + num_descs - 1;
- drd_count = 0;
- for (i=0; i<num_descs; i++, desc--)
- if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
- break;
- return desc;
-}
-
-struct bcom_task *
-bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
-{
- struct bcom_task *tsk;
- struct bcom_fec_priv *priv;
-
- tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
- sizeof(struct bcom_fec_priv));
- if (!tsk)
- return NULL;
-
- tsk->flags = BCOM_FLAGS_ENABLE_TASK;
-
- priv = tsk->priv;
- priv->fifo = fifo;
-
- if (bcom_fec_tx_reset(tsk)) {
- bcom_task_free(tsk);
- return NULL;
- }
-
- return tsk;
-}
-EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
-
-int
-bcom_fec_tx_reset(struct bcom_task *tsk)
-{
- struct bcom_fec_priv *priv = tsk->priv;
- struct bcom_fec_tx_var *var;
- struct bcom_fec_tx_inc *inc;
-
- /* Shutdown the task */
- bcom_disable_task(tsk->tasknum);
-
- /* Reset the microcode */
- var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
- inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
-
- if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
- return -1;
-
- var->enable = bcom_eng->regs_base +
- offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
- var->fifo = (u32) priv->fifo;
- var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
- var->bd_base = tsk->bd_pa;
- var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
- var->bd_start = tsk->bd_pa;
-
- inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
- inc->incr_src = sizeof(u32); /* task image, but we stick */
- inc->incr_src_ma= sizeof(u8); /* to the official ones */
-
- /* Reset the BDs */
- tsk->index = 0;
- tsk->outdex = 0;
-
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
-
- /* Configure some stuff */
- bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
- bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
-
- out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
-
- out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
-
-void
-bcom_fec_tx_release(struct bcom_task *tsk)
-{
- /* Nothing special for the FEC tasks */
- bcom_task_free(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
-
-
-MODULE_DESCRIPTION("BestComm FEC tasks driver");
-MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/sysdev/bestcomm/fec.h b/arch/powerpc/sysdev/bestcomm/fec.h
deleted file mode 100644
index ee565d9..0000000
--- a/arch/powerpc/sysdev/bestcomm/fec.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Header for Bestcomm FEC tasks driver
- *
- *
- * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003-2004 MontaVista, Software, Inc.
- * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __BESTCOMM_FEC_H__
-#define __BESTCOMM_FEC_H__
-
-
-struct bcom_fec_bd {
- u32 status;
- u32 skb_pa;
-};
-
-#define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */
-#define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */
-#define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */
-
-#define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */
-#define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */
-#define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */
-#define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */
-#define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */
-#define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */
-#define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */
-#define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */
-#define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */
-#define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \
- BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR)
-
-
-extern struct bcom_task *
-bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize);
-
-extern int
-bcom_fec_rx_reset(struct bcom_task *tsk);
-
-extern void
-bcom_fec_rx_release(struct bcom_task *tsk);
-
-
-extern struct bcom_task *
-bcom_fec_tx_init(int queue_len, phys_addr_t fifo);
-
-extern int
-bcom_fec_tx_reset(struct bcom_task *tsk);
-
-extern void
-bcom_fec_tx_release(struct bcom_task *tsk);
-
-
-#endif /* __BESTCOMM_FEC_H__ */
-
diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.c b/arch/powerpc/sysdev/bestcomm/gen_bd.c
deleted file mode 100644
index e0a53e3..0000000
--- a/arch/powerpc/sysdev/bestcomm/gen_bd.c
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Driver for MPC52xx processor BestComm General Buffer Descriptor
- *
- * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2006 AppSpec Computer Technologies Corp.
- * Jeff Gibbons <jeff.gibbons@appspec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-
-#include <asm/mpc52xx.h>
-#include <asm/mpc52xx_psc.h>
-
-#include "bestcomm.h"
-#include "bestcomm_priv.h"
-#include "gen_bd.h"
-
-
-/* ======================================================================== */
-/* Task image/var/inc */
-/* ======================================================================== */
-
-/* gen_bd tasks images */
-extern u32 bcom_gen_bd_rx_task[];
-extern u32 bcom_gen_bd_tx_task[];
-
-/* rx task vars that need to be set before enabling the task */
-struct bcom_gen_bd_rx_var {
- u32 enable; /* (u16*) address of task's control register */
- u32 fifo; /* (u32*) address of gen_bd's fifo */
- u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
- u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
- u32 bd_start; /* (struct bcom_bd*) current bd */
- u32 buffer_size; /* size of receive buffer */
-};
-
-/* rx task incs that need to be set before enabling the task */
-struct bcom_gen_bd_rx_inc {
- u16 pad0;
- s16 incr_bytes;
- u16 pad1;
- s16 incr_dst;
-};
-
-/* tx task vars that need to be set before enabling the task */
-struct bcom_gen_bd_tx_var {
- u32 fifo; /* (u32*) address of gen_bd's fifo */
- u32 enable; /* (u16*) address of task's control register */
- u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
- u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
- u32 bd_start; /* (struct bcom_bd*) current bd */
- u32 buffer_size; /* set by uCode for each packet */
-};
-
-/* tx task incs that need to be set before enabling the task */
-struct bcom_gen_bd_tx_inc {
- u16 pad0;
- s16 incr_bytes;
- u16 pad1;
- s16 incr_src;
- u16 pad2;
- s16 incr_src_ma;
-};
-
-/* private structure */
-struct bcom_gen_bd_priv {
- phys_addr_t fifo;
- int initiator;
- int ipr;
- int maxbufsize;
-};
-
-
-/* ======================================================================== */
-/* Task support code */
-/* ======================================================================== */
-
-struct bcom_task *
-bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
- int initiator, int ipr, int maxbufsize)
-{
- struct bcom_task *tsk;
- struct bcom_gen_bd_priv *priv;
-
- tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
- sizeof(struct bcom_gen_bd_priv));
- if (!tsk)
- return NULL;
-
- tsk->flags = BCOM_FLAGS_NONE;
-
- priv = tsk->priv;
- priv->fifo = fifo;
- priv->initiator = initiator;
- priv->ipr = ipr;
- priv->maxbufsize = maxbufsize;
-
- if (bcom_gen_bd_rx_reset(tsk)) {
- bcom_task_free(tsk);
- return NULL;
- }
-
- return tsk;
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
-
-int
-bcom_gen_bd_rx_reset(struct bcom_task *tsk)
-{
- struct bcom_gen_bd_priv *priv = tsk->priv;
- struct bcom_gen_bd_rx_var *var;
- struct bcom_gen_bd_rx_inc *inc;
-
- /* Shutdown the task */
- bcom_disable_task(tsk->tasknum);
-
- /* Reset the microcode */
- var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
- inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
-
- if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
- return -1;
-
- var->enable = bcom_eng->regs_base +
- offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
- var->fifo = (u32) priv->fifo;
- var->bd_base = tsk->bd_pa;
- var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
- var->bd_start = tsk->bd_pa;
- var->buffer_size = priv->maxbufsize;
-
- inc->incr_bytes = -(s16)sizeof(u32);
- inc->incr_dst = sizeof(u32);
-
- /* Reset the BDs */
- tsk->index = 0;
- tsk->outdex = 0;
-
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
-
- /* Configure some stuff */
- bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
- bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
-
- out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
- bcom_set_initiator(tsk->tasknum, priv->initiator);
-
- out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
-
-void
-bcom_gen_bd_rx_release(struct bcom_task *tsk)
-{
- /* Nothing special for the GenBD tasks */
- bcom_task_free(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
-
-
-extern struct bcom_task *
-bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
- int initiator, int ipr)
-{
- struct bcom_task *tsk;
- struct bcom_gen_bd_priv *priv;
-
- tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
- sizeof(struct bcom_gen_bd_priv));
- if (!tsk)
- return NULL;
-
- tsk->flags = BCOM_FLAGS_NONE;
-
- priv = tsk->priv;
- priv->fifo = fifo;
- priv->initiator = initiator;
- priv->ipr = ipr;
-
- if (bcom_gen_bd_tx_reset(tsk)) {
- bcom_task_free(tsk);
- return NULL;
- }
-
- return tsk;
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
-
-int
-bcom_gen_bd_tx_reset(struct bcom_task *tsk)
-{
- struct bcom_gen_bd_priv *priv = tsk->priv;
- struct bcom_gen_bd_tx_var *var;
- struct bcom_gen_bd_tx_inc *inc;
-
- /* Shutdown the task */
- bcom_disable_task(tsk->tasknum);
-
- /* Reset the microcode */
- var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
- inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
-
- if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
- return -1;
-
- var->enable = bcom_eng->regs_base +
- offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
- var->fifo = (u32) priv->fifo;
- var->bd_base = tsk->bd_pa;
- var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
- var->bd_start = tsk->bd_pa;
-
- inc->incr_bytes = -(s16)sizeof(u32);
- inc->incr_src = sizeof(u32);
- inc->incr_src_ma = sizeof(u8);
-
- /* Reset the BDs */
- tsk->index = 0;
- tsk->outdex = 0;
-
- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
-
- /* Configure some stuff */
- bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
- bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
-
- out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
- bcom_set_initiator(tsk->tasknum, priv->initiator);
-
- out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
-
-void
-bcom_gen_bd_tx_release(struct bcom_task *tsk)
-{
- /* Nothing special for the GenBD tasks */
- bcom_task_free(tsk);
-}
-EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
-
-/* ---------------------------------------------------------------------
- * PSC support code
- */
-
-/**
- * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
- *
- * This structure is only used internally. It is a lookup table for PSC
- * specific parameters to bestcomm tasks.
- */
-static struct bcom_psc_params {
- int rx_initiator;
- int rx_ipr;
- int tx_initiator;
- int tx_ipr;
-} bcom_psc_params[] = {
- [0] = {
- .rx_initiator = BCOM_INITIATOR_PSC1_RX,
- .rx_ipr = BCOM_IPR_PSC1_RX,
- .tx_initiator = BCOM_INITIATOR_PSC1_TX,
- .tx_ipr = BCOM_IPR_PSC1_TX,
- },
- [1] = {
- .rx_initiator = BCOM_INITIATOR_PSC2_RX,
- .rx_ipr = BCOM_IPR_PSC2_RX,
- .tx_initiator = BCOM_INITIATOR_PSC2_TX,
- .tx_ipr = BCOM_IPR_PSC2_TX,
- },
- [2] = {
- .rx_initiator = BCOM_INITIATOR_PSC3_RX,
- .rx_ipr = BCOM_IPR_PSC3_RX,
- .tx_initiator = BCOM_INITIATOR_PSC3_TX,
- .tx_ipr = BCOM_IPR_PSC3_TX,
- },
- [3] = {
- .rx_initiator = BCOM_INITIATOR_PSC4_RX,
- .rx_ipr = BCOM_IPR_PSC4_RX,
- .tx_initiator = BCOM_INITIATOR_PSC4_TX,
- .tx_ipr = BCOM_IPR_PSC4_TX,
- },
- [4] = {
- .rx_initiator = BCOM_INITIATOR_PSC5_RX,
- .rx_ipr = BCOM_IPR_PSC5_RX,
- .tx_initiator = BCOM_INITIATOR_PSC5_TX,
- .tx_ipr = BCOM_IPR_PSC5_TX,
- },
- [5] = {
- .rx_initiator = BCOM_INITIATOR_PSC6_RX,
- .rx_ipr = BCOM_IPR_PSC6_RX,
- .tx_initiator = BCOM_INITIATOR_PSC6_TX,
- .tx_ipr = BCOM_IPR_PSC6_TX,
- },
-};
-
-/**
- * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
- * @psc_num: Number of the PSC to allocate a task for
- * @queue_len: number of buffer descriptors to allocate for the task
- * @fifo: physical address of FIFO register
- * @maxbufsize: Maximum receive data size in bytes.
- *
- * Allocate a bestcomm task structure for receiving data from a PSC.
- */
-struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
- phys_addr_t fifo, int maxbufsize)
-{
- if (psc_num >= MPC52xx_PSC_MAXNUM)
- return NULL;
-
- return bcom_gen_bd_rx_init(queue_len, fifo,
- bcom_psc_params[psc_num].rx_initiator,
- bcom_psc_params[psc_num].rx_ipr,
- maxbufsize);
-}
-EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
-
-/**
- * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
- * @psc_num: Number of the PSC to allocate a task for
- * @queue_len: number of buffer descriptors to allocate for the task
- * @fifo: physical address of FIFO register
- *
- * Allocate a bestcomm task structure for transmitting data to a PSC.
- */
-struct bcom_task *
-bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
-{
- struct psc;
- return bcom_gen_bd_tx_init(queue_len, fifo,
- bcom_psc_params[psc_num].tx_initiator,
- bcom_psc_params[psc_num].tx_ipr);
-}
-EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
-
-
-MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
-MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
-MODULE_LICENSE("GPL v2");
-
diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.h b/arch/powerpc/sysdev/bestcomm/gen_bd.h
deleted file mode 100644
index de47260..0000000
--- a/arch/powerpc/sysdev/bestcomm/gen_bd.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Header for Bestcomm General Buffer Descriptor tasks driver
- *
- *
- * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2006 AppSpec Computer Technologies Corp.
- * Jeff Gibbons <jeff.gibbons@appspec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- *
- */
-
-#ifndef __BESTCOMM_GEN_BD_H__
-#define __BESTCOMM_GEN_BD_H__
-
-struct bcom_gen_bd {
- u32 status;
- u32 buf_pa;
-};
-
-
-extern struct bcom_task *
-bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
- int initiator, int ipr, int maxbufsize);
-
-extern int
-bcom_gen_bd_rx_reset(struct bcom_task *tsk);
-
-extern void
-bcom_gen_bd_rx_release(struct bcom_task *tsk);
-
-
-extern struct bcom_task *
-bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
- int initiator, int ipr);
-
-extern int
-bcom_gen_bd_tx_reset(struct bcom_task *tsk);
-
-extern void
-bcom_gen_bd_tx_release(struct bcom_task *tsk);
-
-
-/* PSC support utility wrappers */
-struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
- phys_addr_t fifo, int maxbufsize);
-struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len,
- phys_addr_t fifo);
-#endif /* __BESTCOMM_GEN_BD_H__ */
-
diff --git a/arch/powerpc/sysdev/bestcomm/sram.c b/arch/powerpc/sysdev/bestcomm/sram.c
deleted file mode 100644
index b6db23e..0000000
--- a/arch/powerpc/sysdev/bestcomm/sram.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Simple memory allocator for on-board SRAM
- *
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-
-#include <asm/io.h>
-#include <asm/mmu.h>
-
-#include "sram.h"
-
-
-/* Struct keeping our 'state' */
-struct bcom_sram *bcom_sram = NULL;
-EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
-
-
-/* ======================================================================== */
-/* Public API */
-/* ======================================================================== */
-/* DO NOT USE in interrupts, if needed in irq handler, we should use the
- _irqsave version of the spin_locks */
-
-int bcom_sram_init(struct device_node *sram_node, char *owner)
-{
- int rv;
- const u32 *regaddr_p;
- u64 regaddr64, size64;
- unsigned int psize;
-
- /* Create our state struct */
- if (bcom_sram) {
- printk(KERN_ERR "%s: bcom_sram_init: "
- "Already initialized !\n", owner);
- return -EBUSY;
- }
-
- bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
- if (!bcom_sram) {
- printk(KERN_ERR "%s: bcom_sram_init: "
- "Couldn't allocate internal state !\n", owner);
- return -ENOMEM;
- }
-
- /* Get address and size of the sram */
- regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
- if (!regaddr_p) {
- printk(KERN_ERR "%s: bcom_sram_init: "
- "Invalid device node !\n", owner);
- rv = -EINVAL;
- goto error_free;
- }
-
- regaddr64 = of_translate_address(sram_node, regaddr_p);
-
- bcom_sram->base_phys = (phys_addr_t) regaddr64;
- bcom_sram->size = (unsigned int) size64;
-
- /* Request region */
- if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
- printk(KERN_ERR "%s: bcom_sram_init: "
- "Couldn't request region !\n", owner);
- rv = -EBUSY;
- goto error_free;
- }
-
- /* Map SRAM */
- /* sram is not really __iomem */
- bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
-
- if (!bcom_sram->base_virt) {
- printk(KERN_ERR "%s: bcom_sram_init: "
- "Map error SRAM zone 0x%08lx (0x%0x)!\n",
- owner, (long)bcom_sram->base_phys, bcom_sram->size );
- rv = -ENOMEM;
- goto error_release;
- }
-
- /* Create an rheap (defaults to 32 bits word alignment) */
- bcom_sram->rh = rh_create(4);
-
- /* Attach the free zones */
-#if 0
- /* Currently disabled ... for future use only */
- reg_addr_p = of_get_property(sram_node, "available", &psize);
-#else
- regaddr_p = NULL;
- psize = 0;
-#endif
-
- if (!regaddr_p || !psize) {
- /* Attach the whole zone */
- rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
- } else {
- /* Attach each zone independently */
- while (psize >= 2 * sizeof(u32)) {
- phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
- rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
- regaddr_p += 2;
- psize -= 2 * sizeof(u32);
- }
- }
-
- /* Init our spinlock */
- spin_lock_init(&bcom_sram->lock);
-
- return 0;
-
-error_release:
- release_mem_region(bcom_sram->base_phys, bcom_sram->size);
-error_free:
- kfree(bcom_sram);
- bcom_sram = NULL;
-
- return rv;
-}
-EXPORT_SYMBOL_GPL(bcom_sram_init);
-
-void bcom_sram_cleanup(void)
-{
- /* Free resources */
- if (bcom_sram) {
- rh_destroy(bcom_sram->rh);
- iounmap((void __iomem *)bcom_sram->base_virt);
- release_mem_region(bcom_sram->base_phys, bcom_sram->size);
- kfree(bcom_sram);
- bcom_sram = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
-
-void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
-{
- unsigned long offset;
-
- spin_lock(&bcom_sram->lock);
- offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
- spin_unlock(&bcom_sram->lock);
-
- if (IS_ERR_VALUE(offset))
- return NULL;
-
- *phys = bcom_sram->base_phys + offset;
- return bcom_sram->base_virt + offset;
-}
-EXPORT_SYMBOL_GPL(bcom_sram_alloc);
-
-void bcom_sram_free(void *ptr)
-{
- unsigned long offset;
-
- if (!ptr)
- return;
-
- offset = ptr - bcom_sram->base_virt;
-
- spin_lock(&bcom_sram->lock);
- rh_free(bcom_sram->rh, offset);
- spin_unlock(&bcom_sram->lock);
-}
-EXPORT_SYMBOL_GPL(bcom_sram_free);
-
diff --git a/arch/powerpc/sysdev/bestcomm/sram.h b/arch/powerpc/sysdev/bestcomm/sram.h
deleted file mode 100644
index b6d6689..0000000
--- a/arch/powerpc/sysdev/bestcomm/sram.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Handling of a sram zone for bestcomm
- *
- *
- * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __BESTCOMM_SRAM_H__
-#define __BESTCOMM_SRAM_H__
-
-#include <asm/rheap.h>
-#include <asm/mmu.h>
-#include <linux/spinlock.h>
-
-
-/* Structure used internally */
- /* The internals are here for the inline functions
- * sake, certainly not for the user to mess with !
- */
-struct bcom_sram {
- phys_addr_t base_phys;
- void *base_virt;
- unsigned int size;
- rh_info_t *rh;
- spinlock_t lock;
-};
-
-extern struct bcom_sram *bcom_sram;
-
-
-/* Public API */
-extern int bcom_sram_init(struct device_node *sram_node, char *owner);
-extern void bcom_sram_cleanup(void);
-
-extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys);
-extern void bcom_sram_free(void *ptr);
-
-static inline phys_addr_t bcom_sram_va2pa(void *va) {
- return bcom_sram->base_phys +
- (unsigned long)(va - bcom_sram->base_virt);
-}
-
-static inline void *bcom_sram_pa2va(phys_addr_t pa) {
- return bcom_sram->base_virt +
- (unsigned long)(pa - bcom_sram->base_phys);
-}
-
-
-#endif /* __BESTCOMM_SRAM_H__ */
-
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 6e0e100..9cd0e60 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
ev_int_set_config(src, config, prio, cpuid);
spin_unlock_irqrestore(&ehv_pic_lock, flags);
- return 0;
+ return IRQ_SET_MASK_OK;
}
static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index b29a4ab..b346247 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -442,6 +442,8 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
return 0;
}
+static struct lock_class_key fsl_msi_irq_class;
+
static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
int offset, int irq_index)
{
@@ -460,7 +462,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
dev_err(&dev->dev, "No memory for MSI cascade data\n");
return -ENOMEM;
}
-
+ irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
msi->msi_virqs[irq_index] = virt_msir;
cascade_data->index = offset;
cascade_data->msi_data = msi;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index d213c83..74064e1 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -102,22 +102,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
return indirect_read_config(bus, devfn, offset, len, val);
}
-static struct pci_ops fsl_indirect_pci_ops =
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+static struct pci_ops fsl_indirect_pcie_ops =
{
.read = fsl_indirect_read_config,
.write = indirect_write_config,
};
-static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
- resource_size_t cfg_addr,
- resource_size_t cfg_data, u32 flags)
-{
- setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
- hose->ops = &fsl_indirect_pci_ops;
-}
-
-#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
-
#define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
@@ -158,7 +150,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
flags |= 0x10000000; /* enable relaxed ordering */
for (i = 0; size > 0; i++) {
- unsigned int bits = min(__ilog2_u64(size),
+ unsigned int bits = min(ilog2(size),
__ffs(pci_addr | phys_addr));
if (index + i >= 5)
@@ -183,7 +175,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
struct ccsr_pci __iomem *pci = hose->private_data;
int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
u64 mem, sz, paddr_hi = 0;
- u64 paddr_lo = ULLONG_MAX;
+ u64 offset = 0, paddr_lo = ULLONG_MAX;
u32 pcicsrbar = 0, pcicsrbar_sz;
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
@@ -246,8 +238,9 @@ static void setup_pci_atmu(struct pci_controller *hose)
paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
- n = setup_one_atmu(pci, j, &hose->mem_resources[i],
- hose->pci_mem_offset);
+ /* We assume all memory resources have the same offset */
+ offset = hose->mem_offset[i];
+ n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
if (n < 0 || j >= 5) {
pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
@@ -271,14 +264,14 @@ static void setup_pci_atmu(struct pci_controller *hose)
out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
/* Enable, IO R/W */
out_be32(&pci->pow[j].powar, 0x80088000
- | (__ilog2_u64(hose->io_resource.end
+ | (ilog2(hose->io_resource.end
- hose->io_resource.start + 1) - 1));
}
}
/* convert to pci address space */
- paddr_hi -= hose->pci_mem_offset;
- paddr_lo -= hose->pci_mem_offset;
+ paddr_hi -= offset;
+ paddr_lo -= offset;
if (paddr_hi == paddr_lo) {
pr_err("%s: No outbound window space\n", name);
@@ -336,7 +329,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
}
sz = min(mem, paddr_lo);
- mem_log = __ilog2_u64(sz);
+ mem_log = ilog2(sz);
/* PCIe can overmap inbound & outbound since RX & TX are separated */
#ifdef CONFIG_PPC_QEMU_E500
@@ -369,7 +362,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
* SWIOTLB and access the full range of memory
*/
if (sz != mem) {
- mem_log = __ilog2_u64(mem);
+ mem_log = ilog2(mem);
/* Size window up if we dont fit in exact power-of-2 */
if ((1ull << mem_log) != mem)
@@ -406,7 +399,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
sz -= 1ull << mem_log;
if (sz) {
- mem_log = __ilog2_u64(sz);
+ mem_log = ilog2(sz);
piwar |= (mem_log - 1);
out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
@@ -547,13 +540,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
if (!hose->private_data)
goto no_bridge;
- fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
- PPC_INDIRECT_TYPE_BIG_ENDIAN);
+ setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+ PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
+ /* use fsl_indirect_read_config for PCIe */
+ hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controler mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
index 96f815a..5492dc5 100644
--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
+++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
@@ -9,9 +9,9 @@
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/export.h>
+#include <asm/mpc5xxx.h>
-unsigned int
-mpc5xxx_get_bus_frequency(struct device_node *node)
+unsigned long mpc5xxx_get_bus_frequency(struct device_node *node)
{
struct device_node *np;
const unsigned int *p_bus_freq = NULL;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index c8e41c0..e500517 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
mpic_physmask(mask));
}
- return 0;
+ return IRQ_SET_MASK_OK;
}
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
@@ -1016,8 +1016,12 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
if (hw == mpic->spurious_vec)
return -EINVAL;
- if (mpic->protected && test_bit(hw, mpic->protected))
- return -EINVAL;
+ if (mpic->protected && test_bit(hw, mpic->protected)) {
+ pr_warning("mpic: Mapping of source 0x%x failed, "
+ "source protected by firmware !\n",\
+ (unsigned int)hw);
+ return -EPERM;
+ }
#ifdef CONFIG_SMP
else if (hw >= mpic->ipi_vecs[0]) {
@@ -1044,8 +1048,12 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
if (mpic_map_error_int(mpic, virq, hw))
return 0;
- if (hw >= mpic->num_sources)
+ if (hw >= mpic->num_sources) {
+ pr_warning("mpic: Mapping of source 0x%x failed, "
+ "source out of range !\n",\
+ (unsigned int)hw);
return -EINVAL;
+ }
mpic_msi_reserve_hwirq(mpic, hw);
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 0f6af41..4a25c26 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -214,15 +214,27 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev(
struct device_node *np, int id)
{
struct platform_device *pdev;
- struct resource r[1];
+ struct resource r[2];
int err;
err = of_address_to_resource(np, 0, &r[0]);
if (err)
return ERR_PTR(err);
+ /* register an orion mdio bus driver */
+ r[1].start = r[0].start + 0x4;
+ r[1].end = r[0].start + 0x84 - 1;
+ r[1].flags = IORESOURCE_MEM;
+
+ if (id == 0) {
+ pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1);
+ if (!pdev)
+ return pdev;
+ }
+
pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id,
- r, 1);
+ &r[0], 1);
+
return pdev;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
new file mode 100644
index 0000000..1b15f93
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -0,0 +1,415 @@
+/*
+ * PowerPC 4xx OCM memory allocation support
+ *
+ * (C) Copyright 2009, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@amcc.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <asm/rheap.h>
+#include <asm/ppc4xx_ocm.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#define OCM_DISABLED 0
+#define OCM_ENABLED 1
+
+struct ocm_block {
+ struct list_head list;
+ void __iomem *addr;
+ int size;
+ const char *owner;
+};
+
+/* non-cached or cached region */
+struct ocm_region {
+ phys_addr_t phys;
+ void __iomem *virt;
+
+ int memtotal;
+ int memfree;
+
+ rh_info_t *rh;
+ struct list_head list;
+};
+
+struct ocm_info {
+ int index;
+ int status;
+ int ready;
+
+ phys_addr_t phys;
+
+ int alignment;
+ int memtotal;
+ int cache_size;
+
+ struct ocm_region nc; /* non-cached region */
+ struct ocm_region c; /* cached region */
+};
+
+static struct ocm_info *ocm_nodes;
+static int ocm_count;
+
+static struct ocm_info *ocm_get_node(unsigned int index)
+{
+ if (index >= ocm_count) {
+ printk(KERN_ERR "PPC4XX OCM: invalid index");
+ return NULL;
+ }
+
+ return &ocm_nodes[index];
+}
+
+static int ocm_free_region(struct ocm_region *ocm_reg, const void *addr)
+{
+ struct ocm_block *blk, *tmp;
+ unsigned long offset;
+
+ if (!ocm_reg->virt)
+ return 0;
+
+ list_for_each_entry_safe(blk, tmp, &ocm_reg->list, list) {
+ if (blk->addr == addr) {
+ offset = addr - ocm_reg->virt;
+ ocm_reg->memfree += blk->size;
+ rh_free(ocm_reg->rh, offset);
+ list_del(&blk->list);
+ kfree(blk);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void __init ocm_init_node(int count, struct device_node *node)
+{
+ struct ocm_info *ocm;
+
+ const unsigned int *cell_index;
+ const unsigned int *cache_size;
+ int len;
+
+ struct resource rsrc;
+ int ioflags;
+
+ ocm = ocm_get_node(count);
+
+ cell_index = of_get_property(node, "cell-index", &len);
+ if (!cell_index) {
+ printk(KERN_ERR "PPC4XX OCM: missing cell-index property");
+ return;
+ }
+ ocm->index = *cell_index;
+
+ if (of_device_is_available(node))
+ ocm->status = OCM_ENABLED;
+
+ cache_size = of_get_property(node, "cached-region-size", &len);
+ if (cache_size)
+ ocm->cache_size = *cache_size;
+
+ if (of_address_to_resource(node, 0, &rsrc)) {
+ printk(KERN_ERR "PPC4XX OCM%d: could not get resource address\n",
+ ocm->index);
+ return;
+ }
+
+ ocm->phys = rsrc.start;
+ ocm->memtotal = (rsrc.end - rsrc.start + 1);
+
+ printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (%s)\n",
+ ocm->index, ocm->memtotal,
+ (ocm->status == OCM_DISABLED) ? "disabled" : "enabled");
+
+ if (ocm->status == OCM_DISABLED)
+ return;
+
+ /* request region */
+
+ if (!request_mem_region(ocm->phys, ocm->memtotal, "ppc4xx_ocm")) {
+ printk(KERN_ERR "PPC4XX OCM%d: could not request region\n",
+ ocm->index);
+ return;
+ }
+
+ /* Configure non-cached and cached regions */
+
+ ocm->nc.phys = ocm->phys;
+ ocm->nc.memtotal = ocm->memtotal - ocm->cache_size;
+ ocm->nc.memfree = ocm->nc.memtotal;
+
+ ocm->c.phys = ocm->phys + ocm->nc.memtotal;
+ ocm->c.memtotal = ocm->cache_size;
+ ocm->c.memfree = ocm->c.memtotal;
+
+ if (ocm->nc.memtotal == 0)
+ ocm->nc.phys = 0;
+
+ if (ocm->c.memtotal == 0)
+ ocm->c.phys = 0;
+
+ printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (non-cached)\n",
+ ocm->index, ocm->nc.memtotal);
+
+ printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (cached)\n",
+ ocm->index, ocm->c.memtotal);
+
+ /* ioremap the non-cached region */
+ if (ocm->nc.memtotal) {
+ ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC;
+ ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
+ ioflags);
+
+ if (!ocm->nc.virt) {
+ printk(KERN_ERR
+ "PPC4XX OCM%d: failed to ioremap non-cached memory\n",
+ ocm->index);
+ ocm->nc.memfree = 0;
+ return;
+ }
+ }
+
+ /* ioremap the cached region */
+
+ if (ocm->c.memtotal) {
+ ioflags = _PAGE_EXEC;
+ ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
+ ioflags);
+
+ if (!ocm->c.virt) {
+ printk(KERN_ERR
+ "PPC4XX OCM%d: failed to ioremap cached memory\n",
+ ocm->index);
+ ocm->c.memfree = 0;
+ return;
+ }
+ }
+
+ /* Create Remote Heaps */
+
+ ocm->alignment = 4; /* default 4 byte alignment */
+
+ if (ocm->nc.virt) {
+ ocm->nc.rh = rh_create(ocm->alignment);
+ rh_attach_region(ocm->nc.rh, 0, ocm->nc.memtotal);
+ }
+
+ if (ocm->c.virt) {
+ ocm->c.rh = rh_create(ocm->alignment);
+ rh_attach_region(ocm->c.rh, 0, ocm->c.memtotal);
+ }
+
+ INIT_LIST_HEAD(&ocm->nc.list);
+ INIT_LIST_HEAD(&ocm->c.list);
+
+ ocm->ready = 1;
+
+ return;
+}
+
+static int ocm_debugfs_show(struct seq_file *m, void *v)
+{
+ struct ocm_block *blk, *tmp;
+ unsigned int i;
+
+ for (i = 0; i < ocm_count; i++) {
+ struct ocm_info *ocm = ocm_get_node(i);
+
+ if (!ocm || !ocm->ready)
+ continue;
+
+ seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
+ seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys);
+ seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
+ seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
+ seq_printf(m, "MemTotal(C) : %d Bytes\n", ocm->c.memtotal);
+
+ seq_printf(m, "\n");
+
+ seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys);
+ seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
+ seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
+ seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
+
+ list_for_each_entry_safe(blk, tmp, &ocm->nc.list, list) {
+ seq_printf(m, "NC.MemUsed : %d Bytes (%s)\n",
+ blk->size, blk->owner);
+ }
+
+ seq_printf(m, "\n");
+
+ seq_printf(m, "C.PhysAddr : 0x%llx\n", ocm->c.phys);
+ seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
+ seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
+ seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
+
+ list_for_each_entry_safe(blk, tmp, &ocm->c.list, list) {
+ seq_printf(m, "C.MemUsed : %d Bytes (%s)\n",
+ blk->size, blk->owner);
+ }
+
+ seq_printf(m, "\n");
+ }
+
+ return 0;
+}
+
+static int ocm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ocm_debugfs_show, NULL);
+}
+
+static const struct file_operations ocm_debugfs_fops = {
+ .open = ocm_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ocm_debugfs_init(void)
+{
+ struct dentry *junk;
+
+ junk = debugfs_create_dir("ppc4xx_ocm", 0);
+ if (!junk) {
+ printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create dir\n");
+ return -1;
+ }
+
+ if (debugfs_create_file("info", 0644, junk, NULL, &ocm_debugfs_fops)) {
+ printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create file\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
+ int flags, const char *owner)
+{
+ void __iomem *addr = NULL;
+ unsigned long offset;
+ struct ocm_info *ocm;
+ struct ocm_region *ocm_reg;
+ struct ocm_block *ocm_blk;
+ int i;
+
+ for (i = 0; i < ocm_count; i++) {
+ ocm = ocm_get_node(i);
+
+ if (!ocm || !ocm->ready)
+ continue;
+
+ if (flags == PPC4XX_OCM_NON_CACHED)
+ ocm_reg = &ocm->nc;
+ else
+ ocm_reg = &ocm->c;
+
+ if (!ocm_reg->virt)
+ continue;
+
+ if (align < ocm->alignment)
+ align = ocm->alignment;
+
+ offset = rh_alloc_align(ocm_reg->rh, size, align, NULL);
+
+ if (IS_ERR_VALUE(offset))
+ continue;
+
+ ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL);
+ if (!ocm_blk) {
+ printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
+ rh_free(ocm_reg->rh, offset);
+ break;
+ }
+
+ *phys = ocm_reg->phys + offset;
+ addr = ocm_reg->virt + offset;
+ size = ALIGN(size, align);
+
+ ocm_blk->addr = addr;
+ ocm_blk->size = size;
+ ocm_blk->owner = owner;
+ list_add_tail(&ocm_blk->list, &ocm_reg->list);
+
+ ocm_reg->memfree -= size;
+
+ break;
+ }
+
+ return addr;
+}
+
+void ppc4xx_ocm_free(const void *addr)
+{
+ int i;
+
+ if (!addr)
+ return;
+
+ for (i = 0; i < ocm_count; i++) {
+ struct ocm_info *ocm = ocm_get_node(i);
+
+ if (!ocm || !ocm->ready)
+ continue;
+
+ if (ocm_free_region(&ocm->nc, addr) ||
+ ocm_free_region(&ocm->c, addr))
+ return;
+ }
+}
+
+static int __init ppc4xx_ocm_init(void)
+{
+ struct device_node *np;
+ int count;
+
+ count = 0;
+ for_each_compatible_node(np, NULL, "ibm,ocm")
+ count++;
+
+ if (!count)
+ return 0;
+
+ ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL);
+ if (!ocm_nodes) {
+ printk(KERN_ERR "PPC4XX OCM: failed to allocate OCM nodes!\n");
+ return -ENOMEM;
+ }
+
+ ocm_count = count;
+ count = 0;
+
+ for_each_compatible_node(np, NULL, "ibm,ocm") {
+ ocm_init_node(count, np);
+ count++;
+ }
+
+ ocm_debugfs_init();
+
+ return 0;
+}
+
+arch_initcall(ppc4xx_ocm_init);
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 56e8b3c..64603a1 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -257,6 +257,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -270,7 +271,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* Configure the resource */
if (ppc4xx_setup_one_pci_PMM(hose, reg,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -279,7 +280,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
@@ -457,6 +458,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -470,7 +472,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* Configure the resource */
if (ppc4xx_setup_one_pcix_POM(hose, reg,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -479,7 +481,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
@@ -1792,6 +1794,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
+ resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
@@ -1805,7 +1808,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* Configure the resource */
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
res->start,
- res->start - hose->pci_mem_offset,
+ res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
@@ -1814,7 +1817,7 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
- if (res->start == hose->pci_mem_offset)
+ if (res->start == offset)
found_isa_hole = 1;
}
}
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index 9afba92..af79e1e 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,10 +62,7 @@ static int __init add_rtc(void)
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
- if (IS_ERR(pd))
- return PTR_ERR(pd);
-
- return 0;
+ return PTR_RET(pd);
}
fs_initcall(add_rtc);
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644
index 0000000..ce5a7b4
--- /dev/null
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -0,0 +1,105 @@
+/*
+ * A udbg backend which logs messages and reads input from in memory
+ * buffers.
+ *
+ * The console output can be read from memcons_output which is a
+ * circular buffer whose next write position is stored in memcons.output_pos.
+ *
+ * Input may be passed by writing into the memcons_input buffer when it is
+ * empty. The input buffer is empty when both input_pos == input_start and
+ * *input_start == '\0'.
+ *
+ * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
+ * Copyright (C) 2013 Alistair Popple, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/barrier.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/udbg.h>
+
+struct memcons {
+ char *output_start;
+ char *output_pos;
+ char *output_end;
+ char *input_start;
+ char *input_pos;
+ char *input_end;
+};
+
+static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
+static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
+
+struct memcons memcons = {
+ .output_start = memcons_output,
+ .output_pos = memcons_output,
+ .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
+ .input_start = memcons_input,
+ .input_pos = memcons_input,
+ .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
+};
+
+void memcons_putc(char c)
+{
+ char *new_output_pos;
+
+ *memcons.output_pos = c;
+ wmb();
+ new_output_pos = memcons.output_pos + 1;
+ if (new_output_pos >= memcons.output_end)
+ new_output_pos = memcons.output_start;
+
+ memcons.output_pos = new_output_pos;
+}
+
+int memcons_getc_poll(void)
+{
+ char c;
+ char *new_input_pos;
+
+ if (*memcons.input_pos) {
+ c = *memcons.input_pos;
+
+ new_input_pos = memcons.input_pos + 1;
+ if (new_input_pos >= memcons.input_end)
+ new_input_pos = memcons.input_start;
+ else if (*new_input_pos == '\0')
+ new_input_pos = memcons.input_start;
+
+ *memcons.input_pos = '\0';
+ wmb();
+ memcons.input_pos = new_input_pos;
+ return c;
+ }
+
+ return -1;
+}
+
+int memcons_getc(void)
+{
+ int c;
+
+ while (1) {
+ c = memcons_getc_poll();
+ if (c == -1)
+ cpu_relax();
+ else
+ break;
+ }
+
+ return c;
+}
+
+void udbg_init_memcons(void)
+{
+ udbg_putc = memcons_putc;
+ udbg_getc = memcons_getc;
+ udbg_getc_poll = memcons_getc_poll;
+}
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 48861d3..7cd728b 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -51,6 +51,12 @@ static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
static inline unsigned int icp_native_get_xirr(void)
{
int cpu = smp_processor_id();
+ unsigned int xirr;
+
+ /* Handled an interrupt latched by KVM */
+ xirr = kvmppc_get_xics_latch();
+ if (xirr)
+ return xirr;
return in_be32(&icp_native_regs[cpu]->xirr.word);
}
@@ -81,7 +87,7 @@ static void icp_native_set_cpu_priority(unsigned char cppr)
iosync();
}
-static void icp_native_eoi(struct irq_data *d)
+void icp_native_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
@@ -138,6 +144,7 @@ static unsigned int icp_native_get_irq(void)
static void icp_native_cause_ipi(int cpu, unsigned long data)
{
+ kvmppc_set_host_ipi(cpu, 1);
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
@@ -151,6 +158,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
+ kvmppc_set_host_ipi(cpu, 0);
icp_native_set_qirr(cpu, 0xff);
return smp_ipi_demux();
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index f7e8609..39d7221 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
__func__, d->irq, hw_irq, server, rc);
return -1;
}
- return 0;
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip ics_opal_irq_chip = {
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index c782f85..936575d 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -213,7 +213,7 @@ static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
return !of_device_is_compatible(node, "chrp,iic");
}
-int ics_rtas_init(void)
+__init int ics_rtas_init(void)
{
ibm_get_xive = rtas_token("ibm,get-xive");
ibm_set_xive = rtas_token("ibm,set-xive");
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1f8d2f1..96bf5bd 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -43,6 +43,7 @@
#include <asm/setjmp.h>
#include <asm/reg.h>
#include <asm/debug.h>
+#include <asm/hw_breakpoint.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -607,7 +608,7 @@ static int xmon_sstep(struct pt_regs *regs)
return 1;
}
-static int xmon_dabr_match(struct pt_regs *regs)
+static int xmon_break_match(struct pt_regs *regs)
{
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
@@ -740,8 +741,14 @@ static void insert_bpts(void)
static void insert_cpu_bpts(void)
{
- if (dabr.enabled)
- set_dabr(dabr.address | (dabr.enabled & 7), DABRX_ALL);
+ struct arch_hw_breakpoint brk;
+
+ if (dabr.enabled) {
+ brk.address = dabr.address;
+ brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ brk.len = 8;
+ set_breakpoint(&brk);
+ }
if (iabr && cpu_has_feature(CPU_FTR_IABR))
mtspr(SPRN_IABR, iabr->address
| (iabr->enabled & (BP_IABR|BP_IABR_TE)));
@@ -769,7 +776,7 @@ static void remove_bpts(void)
static void remove_cpu_bpts(void)
{
- set_dabr(0, 0);
+ hw_breakpoint_disable();
if (cpu_has_feature(CPU_FTR_IABR))
mtspr(SPRN_IABR, 0);
}
@@ -1138,7 +1145,7 @@ bpt_cmds(void)
printf(badaddr);
break;
}
- dabr.address &= ~7;
+ dabr.address &= ~HW_BRK_TYPE_DABR;
dabr.enabled = mode | BP_DABR;
}
break;
@@ -1423,7 +1430,7 @@ static void excprint(struct pt_regs *fp)
printf(" sp: %lx\n", fp->gpr[1]);
printf(" msr: %lx\n", fp->msr);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600) {
+ if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) {
printf(" dar: %lx\n", fp->dar);
if (trap != 0x380)
printf(" dsisr: %lx\n", fp->dsisr);
@@ -2917,7 +2924,7 @@ static void xmon_init(int enable)
__debugger_bpt = xmon_bpt;
__debugger_sstep = xmon_sstep;
__debugger_iabr_match = xmon_iabr_match;
- __debugger_dabr_match = xmon_dabr_match;
+ __debugger_break_match = xmon_break_match;
__debugger_fault_handler = xmon_fault_handler;
} else {
__debugger = NULL;
@@ -2925,7 +2932,7 @@ static void xmon_init(int enable)
__debugger_bpt = NULL;
__debugger_sstep = NULL;
__debugger_iabr_match = NULL;
- __debugger_dabr_match = NULL;
+ __debugger_break_match = NULL;
__debugger_fault_handler = NULL;
}
}
@@ -2940,7 +2947,7 @@ static void sysrq_handle_xmon(int key)
static struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
- .help_msg = "Xmon",
+ .help_msg = "xmon(x)",
.action_msg = "Entering xmon",
};