summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt1
-rw-r--r--Documentation/hwlat_detector.txt64
-rw-r--r--Documentation/i2c/busses/i2c-i8012
-rw-r--r--Documentation/sysrq.txt11
-rw-r--r--Documentation/trace/histograms.txt186
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig1
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/boot/dts/nsimosci.dts12
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h13
-rw-r--r--arch/arm/include/asm/switch_to.h8
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/entry-armv.S13
-rw-r--r--arch/arm/kernel/process.c24
-rw-r--r--arch/arm/kernel/setup.c30
-rw-r--r--arch/arm/kernel/signal.c3
-rw-r--r--arch/arm/kernel/unwind.c14
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c1
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c5
-rw-r--r--arch/arm/mach-exynos/platsmp.c12
-rw-r--r--arch/arm/mach-msm/platsmp.c10
-rw-r--r--arch/arm/mach-omap2/omap-smp.c10
-rw-r--r--arch/arm/mach-prima2/platsmp.c10
-rw-r--r--arch/arm/mach-spear/platsmp.c10
-rw-r--r--arch/arm/mach-sti/platsmp.c10
-rw-r--r--arch/arm/mach-tegra/common.c11
-rw-r--r--arch/arm/mach-ux500/platsmp.c10
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/highmem.c43
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/plat-versatile/platsmp.c10
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts2
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/syscall.h6
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/kernel/signal.c1
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/boot/util.S14
-rw-r--r--arch/powerpc/include/asm/eeh.h10
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h7
-rw-r--r--arch/powerpc/include/asm/thread_info.h12
-rw-r--r--arch/powerpc/include/asm/timex.h8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/eeh_driver.c150
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S14
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S2
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c5
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c39
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S6
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/Kconfig13
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h10
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/setup_32.c1
-rw-r--r--arch/sparc/kernel/setup_64.c8
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/syscalls.S4
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/mm/init_64.c14
-rw-r--r--arch/sparc/mm/tsb.c20
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c24
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c21
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S29
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c14
-rw-r--r--arch/x86/crypto/glue_helper.c31
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/signal.h13
-rw-r--r--arch/x86/include/asm/stackprotector.h10
-rw-r--r--arch/x86/include/asm/thread_info.h6
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/asm-offsets.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c131
-rw-r--r--arch/x86/kernel/early-quirks.c15
-rw-r--r--arch/x86/kernel/entry_32.S18
-rw-r--r--arch/x86/kernel/entry_64.S26
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kernel/process_32.c32
-rw-r--r--arch/x86/kernel/quirks.c37
-rw-r--r--arch/x86/kernel/signal.c8
-rw-r--r--arch/x86/kernel/traps.c32
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/highmem_32.c9
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/srat.c16
-rw-r--r--arch/x86/net/bpf_jit.S2
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-softirq.c3
-rw-r--r--crypto/algapi.c4
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/asymmetric_keys/x509_parser.h1
-rw-r--r--crypto/internal.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/hwregs.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/acpi/sleep.c7
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/floppy.c36
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c46
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/char/random.c19
-rw-r--r--drivers/clocksource/tcb_clksrc.c38
-rw-r--r--drivers/clocksource/vf_pit_timer.c2
-rw-r--r--drivers/cpufreq/powernow-k6.c147
-rw-r--r--drivers/edac/amd64_edac.c14
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/drm_edid.c13
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c11
-rw-r--r--drivers/gpu/drm/drm_irq.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/hid/hid-lg4ff.c22
-rw-r--r--drivers/hid/hidraw.c4
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c5
-rw-r--r--drivers/ide/alim15x3.c4
-rw-r--r--drivers/ide/hpt366.c4
-rw-r--r--drivers/ide/ide-io-std.c8
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-iops.c4
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-taskfile.c6
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/input/gameport/gameport.c8
-rw-r--r--drivers/input/mouse/cypress_ps2.c1
-rw-r--r--drivers/input/mouse/elantech.c45
-rw-r--r--drivers/input/mouse/synaptics.c55
-rw-r--r--drivers/input/mousedev.c73
-rw-r--r--drivers/input/tablet/wacom_sys.c17
-rw-r--r--drivers/input/tablet/wacom_wac.c61
-rw-r--r--drivers/input/tablet/wacom_wac.h3
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c23
-rw-r--r--drivers/leds/trigger/Kconfig2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c21
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c52
-rw-r--r--drivers/misc/Kconfig42
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hwlat_detector.c1240
-rw-r--r--drivers/mmc/host/mmci.c5
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c14
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c35
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c24
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c43
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c30
-rw-r--r--drivers/net/ethernet/neterion/s2io.c7
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c25
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/usb/usbnet.c33
-rw-r--r--drivers/net/vxlan.c136
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c19
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c20
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/host/pci-mvebu.c25
-rw-r--r--drivers/pci/host/pcie-designware.c8
-rw-r--r--drivers/regulator/core.c34
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c34
-rw-r--r--drivers/tty/ipwireless/tty.c3
-rw-r--r--drivers/tty/serial/8250/8250_core.c27
-rw-r--r--drivers/tty/serial/amba-pl011.c15
-rw-r--r--drivers/tty/serial/omap-serial.c12
-rw-r--r--drivers/tty/serial/sunhv.c22
-rw-r--r--drivers/tty/serial/sunsab.c14
-rw-r--r--drivers/tty/serial/sunsu.c14
-rw-r--r--drivers/tty/serial/sunzilog.c14
-rw-r--r--drivers/tty/tty_buffer.c24
-rw-r--r--drivers/tty/tty_io.c23
-rw-r--r--drivers/usb/atm/usbatm.h14
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/u_serial.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c10
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c20
-rw-r--r--drivers/video/fbmem.c87
-rw-r--r--drivers/video/fbsysfs.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c10
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/xen/balloon.c24
-rw-r--r--fs/autofs4/autofs_i.h1
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/transaction.c14
-rw-r--r--fs/buffer.c21
-rw-r--r--fs/dcache.c19
-rw-r--r--fs/dlm/lowcomms.c8
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext4/extents.c40
-rw-r--r--fs/ext4/inode.c28
-rw-r--r--fs/fs-writeback.c31
-rw-r--r--fs/fscache/object-list.c5
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jffs2/compr_rtime.c4
-rw-r--r--fs/jffs2/nodelist.h2
-rw-r--r--fs/jffs2/nodemgmt.c14
-rw-r--r--fs/namespace.c8
-rw-r--r--fs/nfs/dir.c21
-rw-r--r--fs/nfs/direct.c38
-rw-r--r--fs/nfs/inode.c51
-rw-r--r--fs/nfs/nfs3acl.c15
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4filelayout.c18
-rw-r--r--fs/nfs/nfs4filelayoutdev.c2
-rw-r--r--fs/nfs/nfs4proc.c22
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfs/write.c7
-rw-r--r--fs/ntfs/aops.c14
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/proc_devtree.c1
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/xfs/xfs_da_btree.c2
-rw-r--r--include/acpi/platform/aclinux.h14
-rw-r--r--include/asm-generic/bug.h14
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/bitops.h15
-rw-r--r--include/linux/buffer_head.h44
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/completion.h8
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/delay.h6
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/ftrace_event.h7
-rw-r--r--include/linux/futex.h4
-rw-r--r--include/linux/highmem.h28
-rw-r--r--include/linux/hrtimer.h19
-rw-r--r--include/linux/huge_mm.h18
-rw-r--r--include/linux/idr.h4
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/interrupt.h64
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irq_work.h1
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqflags.h29
-rw-r--r--include/linux/jbd_common.h24
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kdb.h3
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/lglock.h19
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/list_bl.h28
-rw-r--r--include/linux/locallock.h270
-rw-r--r--include/linux/mm.h60
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/mutex.h20
-rw-r--r--include/linux/mutex_rt.h84
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter/x_tables.h7
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/notifier.h34
-rw-r--r--include/linux/page_cgroup.h15
-rw-r--r--include/linux/percpu.h25
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/preempt.h70
-rw-r--r--include/linux/preempt_mask.h15
-rw-r--r--include/linux/printk.h19
-rw-r--r--include/linux/radix-tree.h7
-rw-r--r--include/linux/random.h2
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/rcutree.h18
-rw-r--r--include/linux/rtmutex.h42
-rw-r--r--include/linux/rwlock_rt.h99
-rw-r--r--include/linux/rwlock_types.h7
-rw-r--r--include/linux/rwlock_types_rt.h33
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/rwsem_rt.h128
-rw-r--r--include/linux/sched.h207
-rw-r--r--include/linux/sched/rt.h5
-rw-r--r--include/linux/seqlock.h54
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/spinlock_api_smp.h4
-rw-r--r--include/linux/spinlock_rt.h166
-rw-r--r--include/linux/spinlock_types.h79
-rw-r--r--include/linux/spinlock_types_nort.h33
-rw-r--r--include/linux/spinlock_types_raw.h56
-rw-r--r--include/linux/spinlock_types_rt.h51
-rw-r--r--include/linux/srcu.h9
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uaccess.h41
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait-simple.h207
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/neighbour.h4
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/trace/events/hist.h72
-rw-r--r--include/trace/events/latency_hist.h29
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--include/uapi/linux/fd.h3
-rw-r--r--init/Kconfig14
-rw-r--r--init/Makefile2
-rw-r--r--init/main.c4
-rw-r--r--ipc/mqueue.c24
-rw-r--r--ipc/msg.c16
-rw-r--r--ipc/sem.c10
-rw-r--r--kernel/Kconfig.locks2
-rw-r--r--kernel/Kconfig.preempt33
-rw-r--r--kernel/Makefile11
-rw-r--r--kernel/cgroup.c26
-rw-r--r--kernel/cpu.c328
-rw-r--r--kernel/debug/kdb/kdb_io.c6
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/exit.c23
-rw-r--r--kernel/fork.c42
-rw-r--r--kernel/futex.c96
-rw-r--r--kernel/hrtimer.c370
-rw-r--r--kernel/irq/handle.c8
-rw-r--r--kernel/irq/manage.c100
-rw-r--r--kernel/irq/settings.h12
-rw-r--r--kernel/irq/spurious.c8
-rw-r--r--kernel/irq_work.c27
-rw-r--r--kernel/itimer.c1
-rw-r--r--kernel/ksysfs.c12
-rw-r--r--kernel/lglock.c54
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/posix-cpu-timers.c198
-rw-r--r--kernel/posix-timers.c37
-rw-r--r--kernel/power/hibernate.c7
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/printk.c148
-rw-r--r--kernel/ptrace.c7
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/rcutiny.c2
-rw-r--r--kernel/rcutree.c152
-rw-r--r--kernel/rcutree.h11
-rw-r--r--kernel/rcutree_plugin.h178
-rw-r--r--kernel/relay.c14
-rw-r--r--kernel/res_counter.c8
-rw-r--r--kernel/rt.c452
-rw-r--r--kernel/rtmutex.c743
-rw-r--r--kernel/rtmutex_common.h12
-rw-r--r--kernel/sched/core.c569
-rw-r--r--kernel/sched/cputime.c62
-rw-r--r--kernel/sched/debug.c7
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/features.h7
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h10
-rw-r--r--kernel/signal.c135
-rw-r--r--kernel/softirq.c742
-rw-r--r--kernel/spinlock.c7
-rw-r--r--kernel/stop_machine.c93
-rw-r--r--kernel/time/jiffies.c7
-rw-r--r--kernel/time/ntp.c40
-rw-r--r--kernel/time/tick-common.c10
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/time/tick-sched.c27
-rw-r--r--kernel/time/timekeeping.c6
-rw-r--r--kernel/timer.c141
-rw-r--r--kernel/trace/Kconfig104
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/latency_hist.c1178
-rw-r--r--kernel/trace/trace.c44
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events.c8
-rw-r--r--kernel/trace/trace_export.c7
-rw-r--r--kernel/trace/trace_irqsoff.c11
-rw-r--r--kernel/trace/trace_output.c18
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/user_namespace.c11
-rw-r--r--kernel/wait-simple.c115
-rw-r--r--kernel/watchdog.c16
-rw-r--r--kernel/workqueue.c174
-rw-r--r--kernel/workqueue_internal.h5
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile3
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/idr.c37
-rw-r--r--lib/locking-selftest.c23
-rw-r--r--lib/nlattr.c10
-rw-r--r--lib/percpu-rwsem.c4
-rw-r--r--lib/radix-tree.c5
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/spinlock_debug.c5
-rw-r--r--localversion-rt1
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c16
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/highmem.c6
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c58
-rw-r--r--mm/mmu_context.c2
-rw-r--r--mm/page_alloc.c149
-rw-r--r--mm/page_cgroup.c11
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slub.c126
-rw-r--r--mm/swap.c38
-rw-r--r--mm/vmalloc.c13
-rw-r--r--mm/vmstat.c6
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/bluetooth/hci_event.c8
-rw-r--r--net/bridge/br_multicast.c33
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/mon_client.c8
-rw-r--r--net/ceph/osd_client.c66
-rw-r--r--net/core/dev.c112
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/ipv4/gre_demux.c8
-rw-r--r--net/ipv4/icmp.c30
-rw-r--r--net/ipv4/inet_fragment.c5
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv4/ip_tunnel_core.c1
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/exthdrs_offload.c4
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/mcast.c11
-rw-r--r--net/ipv6/ping.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/core.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c6
-rw-r--r--net/openvswitch/dp_notify.c7
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h1
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rds/iw.c3
-rw-r--r--net/sched/sch_fq.c28
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/tipc/config.c9
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/name_table.c37
-rw-r--r--net/tipc/server.c14
-rw-r--r--net/tipc/subscr.c48
-rw-r--r--net/unix/af_unix.c17
-rwxr-xr-xscripts/mkcompile_h4
-rw-r--r--scripts/package/builddeb13
-rw-r--r--security/selinux/hooks.c36
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/pci/hda/hda_generic.c8
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c17
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/pci/hda/patch_sigmatel.c27
-rw-r--r--sound/soc/codecs/max98090.c1
-rw-r--r--virt/kvm/ioapic.c2
583 files changed, 4056 insertions, 11992 deletions
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c..4fc3927 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -7,3 +7,4 @@ Required properties:
Optional properties:
- local-mac-address : Ethernet mac address to use
+- vdd-supply: supply for Ethernet mac
diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
deleted file mode 100644
index cb61516..0000000
--- a/Documentation/hwlat_detector.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-Introduction:
--------------
-
-The module hwlat_detector is a special purpose kernel module that is used to
-detect large system latencies induced by the behavior of certain underlying
-hardware or firmware, independent of Linux itself. The code was developed
-originally to detect SMIs (System Management Interrupts) on x86 systems,
-however there is nothing x86 specific about this patchset. It was
-originally written for use by the "RT" patch since the Real Time
-kernel is highly latency sensitive.
-
-SMIs are usually not serviced by the Linux kernel, which typically does not
-even know that they are occuring. SMIs are instead are set up by BIOS code
-and are serviced by BIOS code, usually for "critical" events such as
-management of thermal sensors and fans. Sometimes though, SMIs are used for
-other tasks and those tasks can spend an inordinate amount of time in the
-handler (sometimes measured in milliseconds). Obviously this is a problem if
-you are trying to keep event service latencies down in the microsecond range.
-
-The hardware latency detector works by hogging all of the cpus for configurable
-amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
-for some period, then looking for gaps in the TSC data. Any gap indicates a
-time when the polling was interrupted and since the machine is stopped and
-interrupts turned off the only thing that could do that would be an SMI.
-
-Note that the SMI detector should *NEVER* be used in a production environment.
-It is intended to be run manually to determine if the hardware platform has a
-problem with long system firmware service routines.
-
-Usage:
-------
-
-Loading the module hwlat_detector passing the parameter "enabled=1" (or by
-setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
-step required to start the hwlat_detector. It is possible to redefine the
-threshold in microseconds (us) above which latency spikes will be taken
-into account (parameter "threshold=").
-
-Example:
-
- # modprobe hwlat_detector enabled=1 threshold=100
-
-After the module is loaded, it creates a directory named "hwlat_detector" under
-the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
-to have debugfs mounted, which might be on /sys/debug on your system.
-
-The /debug/hwlat_detector interface contains the following files:
-
-count - number of latency spikes observed since last reset
-enable - a global enable/disable toggle (0/1), resets count
-max - maximum hardware latency actually observed (usecs)
-sample - a pipe from which to read current raw sample data
- in the format <timestamp> <latency observed usecs>
- (can be opened O_NONBLOCK for a single sample)
-threshold - minimum latency value to be considered (usecs)
-width - time period to sample with CPUs held (usecs)
- must be less than the total window size (enforced)
-window - total period of sampling, width being inside (usecs)
-
-By default we will set width to 500,000 and window to 1,000,000, meaning that
-we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
-observe any latencies that exceed the threshold (initially 100 usecs),
-then we write to a global sample ring buffer of 8K samples, which is
-consumed by reading from the "sample" (pipe) debugfs file interface.
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d29dea0..babe2ef 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -25,6 +25,8 @@ Supported adapters:
* Intel Avoton (SOC)
* Intel Wellsburg (PCH)
* Intel Coleto Creek (PCH)
+ * Intel Wildcat Point-LP (PCH)
+ * Intel BayTrail (SOC)
Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index d458683..8cb4d78 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -57,16 +57,9 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
On other - If you know of the key combos for other architectures, please
let me know so I can add them to this section.
-On all - write a character to /proc/sysrq-trigger, e.g.:
- echo t > /proc/sysrq-trigger
-
-On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
- echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
- Send an ICMP echo request with this pattern plus the particular
- SysRq command key. Example:
- # ping -c1 -s57 -p0102030468
- will trigger the SysRq-H (help) command.
+On all - write a character to /proc/sysrq-trigger. e.g.:
+ echo t > /proc/sysrq-trigger
* What are the 'command' keys?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
deleted file mode 100644
index 6f2aeab..0000000
--- a/Documentation/trace/histograms.txt
+++ /dev/null
@@ -1,186 +0,0 @@
- Using the Linux Kernel Latency Histograms
-
-
-This document gives a short explanation how to enable, configure and use
-latency histograms. Latency histograms are primarily relevant in the
-context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
-and are used in the quality management of the Linux real-time
-capabilities.
-
-
-* Purpose of latency histograms
-
-A latency histogram continuously accumulates the frequencies of latency
-data. There are two types of histograms
-- potential sources of latencies
-- effective latencies
-
-
-* Potential sources of latencies
-
-Potential sources of latencies are code segments where interrupts,
-preemption or both are disabled (aka critical sections). To create
-histograms of potential sources of latency, the kernel stores the time
-stamp at the start of a critical section, determines the time elapsed
-when the end of the section is reached, and increments the frequency
-counter of that latency value - irrespective of whether any concurrently
-running process is affected by latency or not.
-- Configuration items (in the Kernel hacking/Tracers submenu)
- CONFIG_INTERRUPT_OFF_LATENCY
- CONFIG_PREEMPT_OFF_LATENCY
-
-
-* Effective latencies
-
-Effective latencies are actually occuring during wakeup of a process. To
-determine effective latencies, the kernel stores the time stamp when a
-process is scheduled to be woken up, and determines the duration of the
-wakeup time shortly before control is passed over to this process. Note
-that the apparent latency in user space may be somewhat longer, since the
-process may be interrupted after control is passed over to it but before
-the execution in user space takes place. Simply measuring the interval
-between enqueuing and wakeup may also not appropriate in cases when a
-process is scheduled as a result of a timer expiration. The timer may have
-missed its deadline, e.g. due to disabled interrupts, but this latency
-would not be registered. Therefore, the offsets of missed timers are
-recorded in a separate histogram. If both wakeup latency and missed timer
-offsets are configured and enabled, a third histogram may be enabled that
-records the overall latency as a sum of the timer latency, if any, and the
-wakeup latency. This histogram is called "timerandwakeup".
-- Configuration items (in the Kernel hacking/Tracers submenu)
- CONFIG_WAKEUP_LATENCY
- CONFIG_MISSED_TIMER_OFSETS
-
-
-* Usage
-
-The interface to the administration of the latency histograms is located
-in the debugfs file system. To mount it, either enter
-
-mount -t sysfs nodev /sys
-mount -t debugfs nodev /sys/kernel/debug
-
-from shell command line level, or add
-
-nodev /sys sysfs defaults 0 0
-nodev /sys/kernel/debug debugfs defaults 0 0
-
-to the file /etc/fstab. All latency histogram related files are then
-available in the directory /sys/kernel/debug/tracing/latency_hist. A
-particular histogram type is enabled by writing non-zero to the related
-variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
-Select "preemptirqsoff" for the histograms of potential sources of
-latencies and "wakeup" for histograms of effective latencies etc. The
-histogram data - one per CPU - are available in the files
-
-/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
-/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
-/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
-/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
-/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
-/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
-/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
-
-The histograms are reset by writing non-zero to the file "reset" in a
-particular latency directory. To reset all latency data, use
-
-#!/bin/sh
-
-TRACINGDIR=/sys/kernel/debug/tracing
-HISTDIR=$TRACINGDIR/latency_hist
-
-if test -d $HISTDIR
-then
- cd $HISTDIR
- for i in `find . | grep /reset$`
- do
- echo 1 >$i
- done
-fi
-
-
-* Data format
-
-Latency data are stored with a resolution of one microsecond. The
-maximum latency is 10,240 microseconds. The data are only valid, if the
-overflow register is empty. Every output line contains the latency in
-microseconds in the first row and the number of samples in the second
-row. To display only lines with a positive latency count, use, for
-example,
-
-grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
-
-#Minimum latency: 0 microseconds.
-#Average latency: 0 microseconds.
-#Maximum latency: 25 microseconds.
-#Total samples: 3104770694
-#There are 0 samples greater or equal than 10240 microseconds
-#usecs samples
- 0 2984486876
- 1 49843506
- 2 58219047
- 3 5348126
- 4 2187960
- 5 3388262
- 6 959289
- 7 208294
- 8 40420
- 9 4485
- 10 14918
- 11 18340
- 12 25052
- 13 19455
- 14 5602
- 15 969
- 16 47
- 17 18
- 18 14
- 19 1
- 20 3
- 21 2
- 22 5
- 23 2
- 25 1
-
-
-* Wakeup latency of a selected process
-
-To only collect wakeup latency data of a particular process, write the
-PID of the requested process to
-
-/sys/kernel/debug/tracing/latency_hist/wakeup/pid
-
-PIDs are not considered, if this variable is set to 0.
-
-
-* Details of the process with the highest wakeup latency so far
-
-Selected data of the process that suffered from the highest wakeup
-latency that occurred in a particular CPU are available in the file
-
-/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
-
-In addition, other relevant system data at the time when the
-latency occurred are given.
-
-The format of the data is (all in one line):
-<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
-<- <PID> <Priority> <Command> <Timestamp>
-
-The value of <Timeroffset> is only relevant in the combined timer
-and wakeup latency recording. In the wakeup recording, it is
-always 0, in the missed_timer_offsets recording, it is the same
-as <Latency>.
-
-When retrospectively searching for the origin of a latency and
-tracing was not enabled, it may be helpful to know the name and
-some basic data of the task that (finally) was switching to the
-late real-tlme task. In addition to the victim's data, also the
-data of the possible culprit are therefore displayed after the
-"<-" symbol.
-
-Finally, the timestamp of the time when the latency occurred
-in <seconds>.<microseconds> after the most recent system boot
-is provided.
-
-These data are also reset when the wakeup histogram is reset.
diff --git a/Makefile b/Makefile
index 517391a..cf5d97e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 12
-SUBLEVEL = 15
+SUBLEVEL = 19
EXTRAVERSION =
NAME = One Giant Leap for Frogkind
diff --git a/arch/Kconfig b/arch/Kconfig
index 77e7e80..af2cc6e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -6,7 +6,6 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
- depends on !PREEMPT_RT_FULL
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index ee01270..98838a0 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
- if (!mm || pagefault_disabled())
+ if (!mm || in_atomic())
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index ea16d78..4f31b2e 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -11,13 +11,16 @@
/ {
compatible = "snps,nsimosci";
- clock-frequency = <80000000>; /* 80 MHZ */
+ clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
chosen {
- bootargs = "console=tty0 consoleblank=0";
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
};
aliases {
@@ -44,15 +47,14 @@
};
uart0: serial@c0000000 {
- compatible = "snps,dw-apb-uart";
+ compatible = "ns8250";
reg = <0xc0000000 0x2000>;
interrupts = <11>;
- #clock-frequency = <80000000>;
clock-frequency = <3686400>;
baud = <115200>;
reg-shift = <2>;
reg-io-width = <4>;
- status = "okay";
+ no-loopback-test = <1>;
};
pgu0: pgu@c9000000 {
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 451af30..c01ba35 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2ec9220..1ad6fb6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -51,7 +51,6 @@ config ARM
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
- select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18..ceb4807 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,11 +120,14 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
+
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index f3e3d80..c99e259 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,13 +3,6 @@
#include <linux/thread_info.h>
-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
-#else
-static inline void
-switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-#endif
-
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@@ -29,7 +22,6 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
- switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 33cb511..df5e13d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -60,7 +60,6 @@ struct arm_restart_block {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
@@ -160,7 +159,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_NEED_RESCHED_LAZY 3
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
@@ -173,7 +171,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 12e46dd..ded0417 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -54,7 +54,6 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 8c5e809..ec3e5cf 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -205,18 +205,11 @@ __irq_svc:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- teq r8, #0 @ if preempt count != 0
- bne 1f @ return from exeption
ldr r0, [tsk, #TI_FLAGS] @ get flags
- tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
- blne svc_preempt @ preempt!
-
- ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
- teq r8, #0 @ if preempt lazy count != 0
+ teq r8, #0 @ if preempt count != 0
movne r0, #0 @ force flags to 0
- tst r0, #_TIF_NEED_RESCHED_LAZY
+ tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
-1:
#endif
svc_exit r5, irq = 1 @ return from exception
@@ -231,8 +224,6 @@ svc_preempt:
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
- bne 1b
- tst r0, #_TIF_NEED_RESCHED_LAZY
moveq pc, r8 @ go again
b 1b
#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 83af229..92f7b15 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -432,30 +432,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
-/*
- * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
- * initialized by pgtable_page_ctor() then a coredump of the vector page will
- * fail.
- */
-static int __init vectors_user_mapping_init_page(void)
-{
- struct page *page;
- unsigned long addr = 0xffff0000;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
- page = pmd_page(*(pmd));
-
- pgtable_page_ctor(page);
-
- return 0;
-}
-late_initcall(vectors_user_mapping_init_page);
-
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 0e1e2b3..2a767d2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -622,6 +622,7 @@ void __init dump_machine_table(void)
int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +635,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- bank->start = PAGE_ALIGN(start);
+ aligned_start = PAGE_ALIGN(start);
-#ifndef CONFIG_ARM_LPAE
- if (bank->start + size < bank->start) {
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+ if (aligned_start > ULONG_MAX) {
+ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+ "32-bit physical address space\n", (long long)start);
+ return -EINVAL;
+ }
+
+ if (aligned_start + size > ULONG_MAX) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
@@ -645,10 +652,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
- size = ULONG_MAX - bank->start;
+ size = ULONG_MAX - aligned_start;
}
#endif
+ if (aligned_start < PHYS_OFFSET) {
+ if (aligned_start + size <= PHYS_OFFSET) {
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, aligned_start + size);
+ return -EINVAL;
+ }
+
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, (u64)PHYS_OFFSET);
+
+ size -= PHYS_OFFSET - aligned_start;
+ aligned_start = PHYS_OFFSET;
+ }
+
+ bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index d1b0bfd..ab33042 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -589,8 +589,7 @@ asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
do {
- if (likely(thread_flags & (_TIF_NEED_RESCHED |
- _TIF_NEED_RESCHED_LAZY))) {
+ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
} else {
if (unlikely(!user_mode(regs)))
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index bbafc67..00df012 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -87,7 +87,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_RAW_SPINLOCK(unwind_lock);
+static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
- raw_spin_lock_irqsave(&unwind_lock, flags);
+ spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
- raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- raw_spin_lock_irqsave(&unwind_lock, flags);
+ spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
- raw_spin_lock_irqsave(&unwind_lock, flags);
+ spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 35f7b26..f607deb 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -134,7 +134,6 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
- remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 1c4c487..bb39232 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -77,7 +77,7 @@ static struct clocksource pit_clk = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static struct irqaction at91sam926x_pit_irq;
+
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
@@ -86,8 +86,6 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* Set up irq handler */
- setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
/* update clocksource counter */
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
@@ -100,7 +98,6 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
case CLOCK_EVT_MODE_UNUSED:
/* disable irq, leaving the clocksource active */
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
- remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
break;
case CLOCK_EVT_MODE_RESUME:
break;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index f56f767..58b43e6 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -73,7 +73,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
static void exynos_secondary_init(unsigned int cpu)
{
@@ -86,8 +86,8 @@ static void exynos_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -99,7 +99,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -128,7 +128,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return -ETIMEDOUT;
}
}
@@ -167,7 +167,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index fc09a04..3f06edc 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -30,7 +30,7 @@
extern void msm_secondary_startup(void);
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
static inline int get_core_count(void)
{
@@ -50,8 +50,8 @@ static void msm_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static void prepare_cold_cpu(unsigned int cpu)
@@ -88,7 +88,7 @@ static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -122,7 +122,7 @@ static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 5969da3..8912110 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -44,7 +44,7 @@ u16 pm44xx_errata;
/* SCU base address */
static void __iomem *scu_base;
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
@@ -68,8 +68,8 @@ static void omap4_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -83,7 +83,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
@@ -160,7 +160,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return 0;
}
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 42837dc4..3dbcb1a 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -23,7 +23,7 @@
static void __iomem *scu_base;
static void __iomem *rsc_base;
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
static struct map_desc scu_io_desc __initdata = {
.length = SZ_4K,
@@ -56,8 +56,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static struct of_device_id rsc_ids[] = {
@@ -95,7 +95,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
/* make sure write buffer is drained */
mb();
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -128,7 +128,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 33dc270..5c4a198 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -20,7 +20,7 @@
#include <mach/spear.h>
#include "generic.h"
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
@@ -36,8 +36,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -48,7 +48,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -75,7 +75,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index c05b764..dce50d9 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -35,7 +35,7 @@ static void write_pen_release(int val)
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
void sti_secondary_init(unsigned int cpu)
{
@@ -50,8 +50,8 @@ void sti_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -62,7 +62,7 @@ int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -93,7 +93,7 @@ int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 94a119a..3c405f4 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/irqchip.h>
#include <linux/clk-provider.h>
@@ -82,10 +83,20 @@ void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd)
static void __init tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
+ static const struct of_device_id pl310_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", },
+ {}
+ };
+
+ struct device_node *np;
int ret;
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
u32 aux_ctrl, cache_type;
+ np = of_find_matching_node(NULL, pl310_ids);
+ if (!np)
+ return;
+
cache_type = readl(p + L2X0_CACHE_TYPE);
aux_ctrl = (cache_type & 0x700) << (17-8);
aux_ctrl |= 0x7C400001;
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index eeb5916..1f296e7 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -52,7 +52,7 @@ static void __iomem *scu_base_addr(void)
return NULL;
}
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
static void ux500_secondary_init(unsigned int cpu)
{
@@ -65,8 +65,8 @@ static void ux500_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -77,7 +77,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -98,7 +98,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b40d4ba..eb8830a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if (user_mode(regs))
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index bd41dd8..21b9e1b 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -38,7 +38,6 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
- pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
@@ -77,10 +76,7 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_top_pte(vaddr, pte);
+ set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr;
}
@@ -97,15 +93,12 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ set_top_pte(vaddr, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
- set_top_pte(vaddr, __pte(0));
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
@@ -117,7 +110,6 @@ EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
- pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
@@ -129,10 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_top_pte(vaddr)));
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_top_pte(vaddr, pte);
+ set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
return (void *)vaddr;
}
@@ -146,29 +135,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
return pte_page(get_top_pte(vaddr));
}
-
-#if defined CONFIG_PREEMPT_RT_FULL
-void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-{
- int i;
-
- /*
- * Clear @prev's kmap_atomic mappings
- */
- for (i = 0; i < prev_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
-
- set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
- }
- /*
- * Restore @next_p's kmap_atomic mappings
- */
- for (i = 0; i < next_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
-
- if (!pte_none(next_p->kmap_pte[i]))
- set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
- next_p->kmap_pte[i]);
- }
-}
-#endif
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9a..33eab61 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
+ pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 304661d..5e85ed3 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
}
/*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+ return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
}
#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b1d17ee..0222ba7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -229,12 +229,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
+ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
+ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -456,7 +460,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index b2e0858..39895d8 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -31,7 +31,7 @@ static void write_pen_release(int val)
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
-static DEFINE_RAW_SPINLOCK(boot_lock);
+static DEFINE_SPINLOCK(boot_lock);
void versatile_secondary_init(unsigned int cpu)
{
@@ -44,8 +44,8 @@ void versatile_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -56,7 +56,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
- raw_spin_lock(&boot_lock);
+ spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
@@ -86,7 +86,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- raw_spin_unlock(&boot_lock);
+ spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc50..519c4b2 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 965c28f..82d95a7 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -255,7 +255,7 @@ static inline int has_transparent_hugepage(void)
#define pgprot_noncached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#define __HAVE_PHYS_MEM_ACCESS_PROT
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 89c047f..70ba9d4 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 25920d2..0eca933 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* If we're in an interrupt or have no user context, we must
* not take the fault...
*/
- if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
goto no_context;
local_irq_enable();
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 281e859..1790f22 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -113,7 +113,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* user context, we must not take the fault.
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if (user_mode(regs))
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 8d9fc16..9a66372 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if (user_mode(__frame))
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 164db10..7225dad 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index ccb6797..e9c6a80 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
* If we're in an interrupt or have no user context or are running in an
* atomic region then we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 311a300..ee121a0 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -16,6 +16,7 @@ config M68K
select FPU if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
+ select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 76bee37..eb1d61f 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if (user_mode(regs))
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 13d6b07..fa4cf52 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
- if (unlikely(!mm || pagefault_disabled())) {
+ if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f3981c2..f75ab4a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2078,7 +2078,7 @@ config CPU_R4400_WORKAROUNDS
#
config HIGHMEM
bool "High Memory Support"
- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
config CPU_SUPPORTS_HIGHMEM
bool
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a4ae7ad..2f285ab 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -573,7 +573,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
local_irq_enable();
- preempt_check_resched();
user_exit();
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 6a492c0..becc42b 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 8bd4425..3516cbd 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 0c91072..10a0c2a 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -392,7 +392,7 @@
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
- ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index f16f57b..0293588 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
int fault;
unsigned int flags;
- if (pagefault_disabled())
+ if (in_atomic())
goto no_context;
tsk = current;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9ca41f7..38f3b7e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -60,11 +60,10 @@ config LOCKDEP_SUPPORT
config RWSEM_GENERIC_SPINLOCK
bool
- default y if PREEMPT_RT_FULL
config RWSEM_XCHGADD_ALGORITHM
bool
- default y if !PREEMPT_RT_FULL
+ default y
config GENERIC_LOCKBREAK
bool
@@ -132,7 +131,6 @@ config PPC
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
- select HAVE_PREEMPT_LAZY
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
@@ -287,7 +285,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
- depends on PPC32 && !PREEMPT_RT_FULL
+ depends on PPC32
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 5143228..6636b1d 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,18 +71,32 @@ udelay:
add r4,r4,r5
addi r4,r4,-1
divw r4,r4,r5 /* BUS ticks */
+#ifdef CONFIG_8xx
+1: mftbu r5
+ mftb r6
+ mftbu r7
+#else
1: mfspr r5, SPRN_TBRU
mfspr r6, SPRN_TBRL
mfspr r7, SPRN_TBRU
+#endif
cmpw 0,r5,r7
bne 1b /* Get [synced] base time */
addc r9,r6,r4 /* Compute end time */
addze r8,r5
+#ifdef CONFIG_8xx
+2: mftbu r5
+#else
2: mfspr r5, SPRN_TBRU
+#endif
cmpw 0,r5,r8
blt 2b
bgt 3f
+#ifdef CONFIG_8xx
+ mftb r6
+#else
mfspr r6, SPRN_TBRL
+#endif
cmpw 0,r6,r9
blt 2b
3: blr
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d3e5e9b..e37db7f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -117,6 +117,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
return edev ? edev->pdev : NULL;
}
+/* Return values from eeh_ops::next_error */
+enum {
+ EEH_NEXT_ERR_NONE = 0,
+ EEH_NEXT_ERR_INF,
+ EEH_NEXT_ERR_FROZEN_PE,
+ EEH_NEXT_ERR_FENCED_PHB,
+ EEH_NEXT_ERR_DEAD_PHB,
+ EEH_NEXT_ERR_DEAD_IOC
+};
+
/*
* The struct is used to trace the registered EEH operation
* callback functions. Actually, those operation callback
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c2dcfaa..0d2d0f0 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -438,6 +438,8 @@ BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
+#elif defined(CONFIG_8xx)
+#define MFTB(dest) mftb dest
#else
#define MFTB(dest) mfspr dest, SPRN_TBRL
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 10d1ef0..7ca729c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1154,12 +1154,19 @@
#else /* __powerpc64__ */
+#if defined(CONFIG_8xx)
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
#define mftbl() ({unsigned long rval; \
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRL)); rval;})
#define mftbu() ({unsigned long rval; \
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRU)); rval;})
+#endif
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index f50711f..ba7b197 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -43,8 +43,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
- int preempt_lazy_count; /* 0 => preemptable,
- <0 => BUG */
struct restart_block restart_block;
unsigned long local_flags; /* private flags for thread */
@@ -92,7 +90,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
@@ -108,8 +107,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_POLLING_NRFLAG 18 /* true if poll_idle() is polling
- TIF_NEED_RESCHED */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -129,16 +126,13 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
-#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED_LAZY)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
-#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 18908ca..2cf846e 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
+#ifdef CONFIG_8xx
+ "97: mftb %0\n"
+#else
"97: mfspr %0, %2\n"
+#endif
"99:\n"
".section __ftr_fixup,\"a\"\n"
".align 2\n"
@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
" .long 0\n"
" .long 0\n"
".previous"
+#ifdef CONFIG_8xx
+ : "=r" (ret) : "i" (CPU_FTR_601));
+#else
: "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
+#endif
return ret;
#endif
}
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 55210c0..502c7a4 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -165,7 +165,6 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 36bed5a..d3a132c 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -626,84 +626,90 @@ static void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe;
struct pci_bus *bus;
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
unsigned long flags;
- int rc = 0;
+ int rc;
- /*
- * The return value from next_error() has been classified as follows.
- * It might be good to enumerate them. However, next_error() is only
- * supported by PowerNV platform for now. So it would be fine to use
- * integer directly:
- *
- * 4 - Dead IOC 3 - Dead PHB
- * 2 - Fenced PHB 1 - Frozen PE
- * 0 - No error found
- *
- */
- rc = eeh_ops->next_error(&pe);
- if (rc <= 0)
- return;
- switch (rc) {
- case 4:
- /* Mark all PHBs in dead state */
- eeh_serialize_lock(&flags);
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe) continue;
-
- eeh_pe_state_mark(phb_pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ do {
+ rc = eeh_ops->next_error(&pe);
+
+ switch (rc) {
+ case EEH_NEXT_ERR_DEAD_IOC:
+ /* Mark all PHBs in dead state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events */
+ eeh_remove_event(NULL);
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe) continue;
+
+ eeh_pe_state_mark(phb_pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ }
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_FROZEN_PE:
+ case EEH_NEXT_ERR_FENCED_PHB:
+ case EEH_NEXT_ERR_DEAD_PHB:
+ /* Mark the PE in fenced state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events of the PHB */
+ eeh_remove_event(pe);
+
+ if (rc == EEH_NEXT_ERR_DEAD_PHB)
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ else
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_RECOVERING);
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_NONE:
+ return;
+ default:
+ pr_warn("%s: Invalid value %d from next_error()\n",
+ __func__, rc);
+ return;
}
- eeh_serialize_unlock(flags);
-
- /* Purge all events */
- eeh_remove_event(NULL);
- break;
- case 3:
- case 2:
- case 1:
- /* Mark the PE in fenced state */
- eeh_serialize_lock(&flags);
- if (rc == 3)
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
- else
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
- eeh_serialize_unlock(flags);
-
- /* Purge all events of the PHB */
- eeh_remove_event(pe);
- break;
- default:
- pr_err("%s: Invalid value %d from next_error()\n",
- __func__, rc);
- return;
- }
- /*
- * For fenced PHB and frozen PE, it's handled as normal
- * event. We have to remove the affected PHBs for dead
- * PHB and IOC
- */
- if (rc == 2 || rc == 1)
- eeh_handle_normal_event(pe);
- else {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
- continue;
-
- bus = eeh_pe_bus_get(phb_pe);
- /* Notify all devices that they're about to go down. */
- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
- pcibios_remove_pci_devices(bus);
+ /*
+ * For fenced PHB and frozen PE, it's handled as normal
+ * event. We have to remove the affected PHBs for dead
+ * PHB and IOC
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
+ eeh_handle_normal_event(pe);
+ } else {
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe ||
+ !(phb_pe->state & EEH_PE_PHB_DEAD))
+ continue;
+
+ /* Notify all devices to be down */
+ bus = eeh_pe_bus_get(phb_pe);
+ eeh_pe_dev_traverse(pe,
+ eeh_report_failure, NULL);
+ pcibios_remove_pci_devices(bus);
+ }
}
- }
+
+ /*
+ * If we have detected dead IOC, we needn't proceed
+ * any more since all PHBs would have been removed
+ */
+ if (rc == EEH_NEXT_ERR_DEAD_IOC)
+ break;
+ } while (rc != EEH_NEXT_ERR_NONE);
}
/**
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 081f926..22b45a4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -890,14 +890,7 @@ resume_kernel:
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
- bne+ 1f
- lwz r0,TI_PREEMPT_LAZY(r9)
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
- lwz r0,TI_FLAGS(r9)
- andi. r0,r0,_TIF_NEED_RESCHED_LAZY
beq+ restore
-1:
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
@@ -908,11 +901,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
-2: bl preempt_schedule_irq
+1: bl preempt_schedule_irq
CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
- andi. r0,r3,_TIF_NEED_RESCHED_MASK
- bne- 2b
+ andi. r0,r3,_TIF_NEED_RESCHED
+ bne- 1b
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
@@ -1233,7 +1226,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
- andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ andi. r0,r9,_TIF_NEED_RESCHED
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
@@ -1254,7 +1247,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e6bfe8e..7be3717 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -659,7 +659,7 @@ _GLOBAL(ret_from_except_lite)
#else
beq restore
#endif
-1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+1: andi. r0,r4,_TIF_NEED_RESCHED
beq 2f
bl .restore_interrupts
SCHEDULE_USER
@@ -709,18 +709,10 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
- lwz r8,TI_PREEMPT(r9)
- cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
- bne restore
andi. r0,r4,_TIF_NEED_RESCHED
- bne+ check_count
-
- andi. r0,r4,_TIF_NEED_RESCHED_LAZY
beq+ restore
- lwz r8,TI_PREEMPT_LAZY(r9)
-
/* Check that preempt_count() == 0 and interrupts are enabled */
-check_count:
+ lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
@@ -737,7 +729,7 @@ check_count:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f3ed55a..c7cb8c2 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -594,7 +594,6 @@ void irq_ctx_init(void)
}
}
-#ifndef CONFIG_PREEMPT_RT_FULL
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
@@ -627,7 +626,6 @@ void do_softirq(void)
local_irq_restore(flags);
}
-#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 84cbc94..ace3413 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -40,7 +40,6 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
@@ -57,7 +56,6 @@ _GLOBAL(call_do_softirq)
stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
-#endif
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f93987e..e59caf8 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -29,7 +29,6 @@
.text
-#ifndef CONFIG_PREEMPT_RT_FULL
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
@@ -40,7 +39,6 @@ _GLOBAL(call_do_softirq)
ld r0,16(r1)
mtlr r0
blr
-#endif
_GLOBAL(call_do_irq)
mflr r0
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5ac241b..b3b1441 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -423,7 +423,7 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b1f2a6..6b2b696 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,9 +232,15 @@ __do_get_tspec:
lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
/* Get a stable TB value */
+#ifdef CONFIG_8xx
+2: mftbu r3
+ mftbl r4
+ mftbu r0
+#else
2: mfspr r3, SPRN_TBRU
mfspr r4, SPRN_TBRL
mfspr r0, SPRN_TBRU
+#endif
cmplw cr0,r3,r0
bne- 2b
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 45aa26e..51ab9e7 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (in_atomic() || mm == NULL || pagefault_disabled()) {
+ if (in_atomic() || mm == NULL) {
if (!user_mode(regs)) {
rc = SIGSEGV;
goto bail;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 2898b73..b69221b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
{
int l1irq;
int l2irq;
- struct irq_chip *uninitialized_var(irqchip);
+ struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
@@ -373,8 +373,9 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
case MPC52xx_IRQ_L1_CRIT:
+ default:
pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
- __func__, l2irq);
+ __func__, l1irq);
irq_set_chip(virq, &no_irq_chip);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index b7eb5d4..227c7fe 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -766,12 +766,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
*/
static int ioda_eeh_next_error(struct eeh_pe **pe)
{
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
struct pnv_phb *phb;
u64 frozen_pe_no;
u16 err_type, severity;
long rc;
- int ret = 1;
+ int ret = EEH_NEXT_ERR_NONE;
/*
* While running here, it's safe to purge the event queue.
@@ -781,7 +781,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
eeh_remove_event(NULL);
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list, list_node) {
/*
* If the subordinate PCI buses of the PHB has been
* removed, we needn't take care of it any more.
@@ -820,19 +820,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
switch (err_type) {
case OPAL_EEH_IOC_ERROR:
if (severity == OPAL_EEH_SEV_IOC_DEAD) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list,
+ list_node) {
phb = hose->private_data;
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
}
pr_err("EEH: dead IOC detected\n");
- ret = 4;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_IOC;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: IOC informative error "
"detected\n");
ioda_eeh_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
@@ -844,21 +844,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
pr_err("EEH: dead PHB#%x detected\n",
hose->global_number);
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
- ret = 3;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_PHB;
} else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
if (ioda_eeh_get_phb_pe(hose, pe))
break;
pr_err("EEH: fenced PHB#%x detected\n",
hose->global_number);
- ret = 2;
- goto out;
+ ret = EEH_NEXT_ERR_FENCED_PHB;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: PHB#%x informative error "
"detected\n",
hose->global_number);
ioda_eeh_phb_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
@@ -868,13 +867,23 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
(*pe)->addr, (*pe)->phb->global_number);
- ret = 1;
- goto out;
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, err_type);
}
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
}
- ret = 0;
-out:
return ret;
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3e01afa..6671e8d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,6 +116,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5be8e47..65fc397 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -46,18 +46,13 @@ __kernel_clock_gettime:
jnm 3f
a %r0,__VDSO_TK_MULT(%r5)
3: alr %r0,%r2
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,4f
- ahi %r0,1
-4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
+ al %r0,__VDSO_WTOM_NSEC(%r5)
al %r1,__VDSO_WTOM_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
srdl %r0,0(%r2) /* >> tk->shift */
- l %r2,__VDSO_XTIME_SEC+4(%r5)
- al %r2,__VDSO_WTOM_SEC+4(%r5)
+ l %r2,__VDSO_WTOM_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
basr %r5,0
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 0add107..c09cda3 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -35,13 +35,11 @@ __kernel_clock_gettime:
jnz 0b
stck 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
- alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
+ lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
+ alg %r1,__VDSO_WTOM_NSEC(%r5)
srlg %r1,%r1,0(%r2) /* >> tk->shift */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6b0efce..fc66792 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -296,8 +296,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(trans_exc_code) ||
- !mm || pagefault_disabled()))
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -443,8 +442,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
clear_tsk_thread_flag(current, TIF_PER_TRAP);
trans_exc_code = regs->int_parm_long;
- if (unlikely(!user_space_fault(trans_exc_code) || !mm ||
- pagefault_disabled()))
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 35d339b..52238983 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto bad_area_nosemaphore;
if (user_mode(regs))
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index ae4b141..063af10 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,7 +149,6 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void)
{
unsigned long flags;
@@ -192,7 +191,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-#endif
#else
static inline void handle_one_irq(unsigned int irq)
{
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 6589138..541dc61 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(!mm || pagefault_disabled())) {
+ if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index da51da9..4e56838 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,8 +25,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
- select IRQ_FORCED_THREADING
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select USE_GENERIC_SMP_HELPERS if SMP
@@ -178,10 +177,12 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
- def_bool PREEMPT_RT_FULL
+ bool
+ default y if SPARC32
config RWSEM_XCHGADD_ALGORITHM
- def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ bool
+ default y if SPARC64
config GENERIC_HWEIGHT
bool
@@ -522,10 +523,6 @@ menu "Executable file formats"
source "fs/Kconfig.binfmt"
-config EARLY_PRINTK
- bool
- default y
-
config COMPAT
bool
depends on SPARC64
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index e945ddb..76092c4 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -90,7 +90,7 @@ struct tsb_config {
#endif
typedef struct {
- raw_spinlock_t lock;
+ spinlock_t lock;
unsigned long sparc64_ctx_val;
unsigned long huge_pte_count;
struct page *pgtable_page;
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 44e393b..3d528f0 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -13,7 +13,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-extern raw_spinlock_t ctx_alloc_lock;
+extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
@@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
if (unlikely(mm == &init_mm))
return;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid)
get_new_mmu_context(mm);
@@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
unsigned long flags;
int cpu;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);
cpu = smp_processor_id();
@@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
tsb_context_switch(mm);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index d74fa7f..d4840ce 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -698,7 +698,6 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq(void)
{
unsigned long flags;
@@ -724,7 +723,6 @@ void do_softirq(void)
local_irq_restore(flags);
}
-#endif
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index dbb51a6..269af58 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -43,12 +43,10 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
void arch_irq_work_raise(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
-#endif
const struct pcr_ops *pcr_ops;
EXPORT_SYMBOL_GPL(pcr_ops);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index baebab2..b9cc976 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -57,9 +57,12 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -81,7 +84,6 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 0884ccd..1434526 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -309,7 +309,6 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
- early_console = &prom_early_console;
register_console(&prom_early_console);
printk("ARCH: ");
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 4306d44..3fdb455 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -555,12 +555,6 @@ static void __init init_sparc64_elf_hwcap(void)
pause_patch();
}
-static inline void register_prom_console(void)
-{
- early_console = &prom_early_console;
- register_console(&prom_early_console);
-}
-
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
@@ -572,7 +566,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
- register_prom_console();
+ register_console(&prom_early_console);
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 8c68424..e142545 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -976,12 +976,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg
if (unlikely(!mm || (mm == &init_mm)))
return;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
if (unlikely(!CTX_VALID(mm->context)))
get_new_mmu_context(mm);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context),
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d950197..6dee795 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@ linux_sparc_syscall32:
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 2eaca28..59dbd46 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a1d35e2..2ebec26 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled())
+ if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ec995b0..ed82eda 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
@@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
address, pte_val(pte));
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
void flush_dcache_page(struct page *page)
@@ -661,7 +661,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
-DEFINE_RAW_SPINLOCK(ctx_alloc_lock);
+DEFINE_SPINLOCK(ctx_alloc_lock);
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
@@ -683,7 +683,7 @@ void get_new_mmu_context(struct mm_struct *mm)
unsigned long orig_pgsz_bits;
int new_version;
- raw_spin_lock(&ctx_alloc_lock);
+ spin_lock(&ctx_alloc_lock);
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
@@ -719,7 +719,7 @@ void get_new_mmu_context(struct mm_struct *mm)
out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
- raw_spin_unlock(&ctx_alloc_lock);
+ spin_unlock(&ctx_alloc_lock);
if (unlikely(new_version))
smp_new_mmu_context_version();
@@ -2721,7 +2721,7 @@ void hugetlb_setup(struct pt_regs *regs)
if (tlb_type == cheetah_plus) {
unsigned long ctx;
- raw_spin_lock(&ctx_alloc_lock);
+ spin_lock(&ctx_alloc_lock);
ctx = mm->context.sparc64_ctx_val;
ctx &= ~CTX_PGSZ_MASK;
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2742,7 +2742,7 @@ void hugetlb_setup(struct pt_regs *regs)
mm->context.sparc64_ctx_val = ctx;
on_each_cpu(context_reload, mm, 0);
}
- raw_spin_unlock(&ctx_alloc_lock);
+ spin_unlock(&ctx_alloc_lock);
}
}
#endif
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9eb10b4..2cc3bce 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb)
struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
@@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb)
__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
}
#endif
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long nentries, base, flags;
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
@@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
}
#endif
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
@@ -392,7 +392,7 @@ retry_tsb_alloc:
* the lock and ask all other cpus running this address space
* to run tsb_context_switch() to see the new TSB table.
*/
- raw_spin_lock_irqsave(&mm->context.lock, flags);
+ spin_lock_irqsave(&mm->context.lock, flags);
old_tsb = mm->context.tsb_block[tsb_index].tsb;
old_cache_index =
@@ -407,7 +407,7 @@ retry_tsb_alloc:
*/
if (unlikely(old_tsb &&
(rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return;
@@ -433,7 +433,7 @@ retry_tsb_alloc:
mm->context.tsb_block[tsb_index].tsb = new_tsb;
setup_tsb_params(mm, tsb_index, new_size);
- raw_spin_unlock_irqrestore(&mm->context.lock, flags);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
/* If old_tsb is NULL, we're being invoked for the first time
* from init_new_context().
@@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#endif
unsigned int i;
- raw_spin_lock_init(&mm->context.lock);
+ spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL;
@@ -523,12 +523,12 @@ void destroy_context(struct mm_struct *mm)
free_hot_cold_page(page, 0);
}
- raw_spin_lock_irqsave(&ctx_alloc_lock, flags);
+ spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) {
unsigned long nr = CTX_NRBITS(mm->context);
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
}
- raw_spin_unlock_irqrestore(&ctx_alloc_lock, flags);
+ spin_unlock_irqrestore(&ctx_alloc_lock, flags);
}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 40f30ac..6c05712 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_regs *regs,
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
*/
- if (!mm || pagefault_disabled()) {
+ if (in_atomic() || !mm) {
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 100a278..5c3aef7 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -38,7 +38,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
* If the fault was during atomic operation, don't take the fault, just
* fail.
*/
- if (pagefault_disabled())
+ if (in_atomic())
goto out_nosemaphore;
if (is_user)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index edbb857..f67e839 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,7 +21,6 @@ config X86_64
### Arch settings
config X86
def_bool y
- select HAVE_PREEMPT_LAZY
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -180,11 +179,8 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
-config RWSEM_GENERIC_SPINLOCK
- def_bool PREEMPT_RT_FULL
-
config RWSEM_XCHGADD_ALGORITHM
- def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -474,7 +470,7 @@ config X86_MDFLD
select MFD_INTEL_MSIC
---help---
Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
- Internet Device(MID) platform.
+ Internet Device(MID) platform.
Unlike standard x86 PCs, Medfield does not have many legacy devices
nor standard legacy replacement devices/features. e.g. Medfield does
not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
@@ -821,7 +817,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
- select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ select CPUMASK_OFFSTACK
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 3fbe870..f80e668 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -252,14 +252,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
- kernel_fpu_begin();
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
- kernel_fpu_end();
+ nbytes & AES_BLOCK_MASK);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ kernel_fpu_end();
return err;
}
@@ -276,14 +276,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
- kernel_fpu_begin();
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK);
- kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ kernel_fpu_end();
return err;
}
@@ -300,14 +300,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
- kernel_fpu_begin();
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
- kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ kernel_fpu_end();
return err;
}
@@ -324,14 +324,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
- kernel_fpu_begin();
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
- kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ kernel_fpu_end();
return err;
}
@@ -364,20 +364,18 @@ static int ctr_crypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- kernel_fpu_begin();
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
- kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
- kernel_fpu_begin();
ctr_crypt_final(ctx, &walk);
- kernel_fpu_end();
err = blkcipher_walk_done(desc, &walk, 0);
}
+ kernel_fpu_end();
return err;
}
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 2d48e83..c663181 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
@@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
- fpu_enabled = cast5_fpu_begin(false, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
@@ -104,9 +104,10 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
+
+ cast5_fpu_end(fpu_enabled);
return err;
}
@@ -230,7 +231,7 @@ done:
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
@@ -239,11 +240,12 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(false, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
nbytes = __cbc_decrypt(desc, &walk);
- cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+
+ cast5_fpu_end(fpu_enabled);
return err;
}
@@ -313,7 +315,7 @@ done:
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
@@ -322,12 +324,13 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(false, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
nbytes = __ctr_crypt(desc, &walk);
- cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ cast5_fpu_end(fpu_enabled);
+
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 586f41a..185fad4 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -24,10 +24,6 @@
.align 16
.Lbswap_mask:
.octa 0x000102030405060708090a0b0c0d0e0f
-.Lpoly:
- .octa 0xc2000000000000000000000000000001
-.Ltwo_one:
- .octa 0x00000001000000000000000000000001
#define DATA %xmm0
#define SHASH %xmm1
@@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
.Lupdate_just_ret:
ret
ENDPROC(clmul_ghash_update)
-
-/*
- * void clmul_ghash_setkey(be128 *shash, const u8 *key);
- *
- * Calculate hash_key << 1 mod poly
- */
-ENTRY(clmul_ghash_setkey)
- movaps .Lbswap_mask, BSWAP
- movups (%rsi), %xmm0
- PSHUFB_XMM BSWAP %xmm0
- movaps %xmm0, %xmm1
- psllq $1, %xmm0
- psrlq $63, %xmm1
- movaps %xmm1, %xmm2
- pslldq $8, %xmm1
- psrldq $8, %xmm2
- por %xmm1, %xmm0
- # reduction
- pshufd $0b00100100, %xmm2, %xmm1
- pcmpeqd .Ltwo_one, %xmm1
- pand .Lpoly, %xmm1
- pxor %xmm1, %xmm0
- movups %xmm0, (%rdi)
- ret
-ENDPROC(clmul_ghash_setkey)
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 6759dd1..d785cf2 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
const be128 *shash);
-void clmul_ghash_setkey(be128 *shash, const u8 *key);
-
struct ghash_async_ctx {
struct cryptd_ahash *cryptd_tfm;
};
@@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 *x = (be128 *)key;
+ u64 a, b;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- clmul_ghash_setkey(&ctx->shash, key);
+ /* perform multiplication by 'x' in GF(2^128) */
+ a = be64_to_cpu(x->a);
+ b = be64_to_cpu(x->b);
+
+ ctx->shash.a = (__be64)((b << 1) | (a >> 63));
+ ctx->shash.b = (__be64)((a << 1) | (b >> 63));
+
+ if (a >> 63)
+ ctx->shash.b ^= cpu_to_be64(0xc2);
return 0;
}
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 4a2bd21..432f1d76 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes, i, func_bytes;
- bool fpu_enabled;
+ bool fpu_enabled = false;
int err;
err = blkcipher_walk_virt(desc, walk);
@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, false, nbytes);
+ desc, fpu_enabled, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
}
done:
- glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
+ glue_fpu_end(fpu_enabled);
return err;
}
@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
while ((nbytes = walk.nbytes)) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, false, nbytes);
+ desc, fpu_enabled, nbytes);
nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
- glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
@@ -278,7 +278,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
@@ -287,12 +287,13 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
while ((nbytes = walk.nbytes) >= bsize) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, false, nbytes);
+ desc, fpu_enabled, nbytes);
nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
- glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ glue_fpu_end(fpu_enabled);
+
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
@@ -347,7 +348,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
void *tweak_ctx, void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled;
+ bool fpu_enabled = false;
struct blkcipher_walk walk;
int err;
@@ -360,21 +361,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, false,
+ desc, fpu_enabled,
nbytes < bsize ? bsize : nbytes);
+
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
- glue_fpu_end(fpu_enabled);
while (nbytes) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, false, nbytes);
nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
- glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
}
+
+ glue_fpu_end(fpu_enabled);
+
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 50226c4..e52947f 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) {
#ifdef CONFIG_AS_AVX2
- if (boot_cpu_has(X86_FEATURE_AVX2))
+ if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
sha256_transform_asm = sha256_transform_rorx;
else
#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ad38ad..bbc8b12 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
-static inline int pteval_present(pteval_t pteval)
-{
- /*
- * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
- * way clearly states that the intent is that protnone and numa
- * hinting ptes are considered present for the purposes of
- * pagetable operations like zapping, protection changes, gup etc.
- */
- return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
static inline int pte_present(pte_t a)
{
- return pteval_present(pte_flags(a));
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 6ec0792..35e67a4 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -23,19 +23,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-/*
- * Because some traps use the IST stack, we must keep preemption
- * disabled while calling do_trap(), but do_trap() may call
- * force_sig_info() which will grab the signal spin_locks for the
- * task, which in PREEMPT_RT_FULL are mutexes. By defining
- * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
- * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
- * trap.
- */
-#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
-#define ARCH_RT_DELAYS_SIGNAL_SEND
-#endif
-
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 64fb5cb..6a99859 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -57,7 +57,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
{
- u64 uninitialized_var(canary);
+ u64 canary;
u64 tsc;
#ifdef CONFIG_X86_64
@@ -68,16 +68,8 @@ static __always_inline void boot_init_stack_canary(void)
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
- *
- * For preempt-rt we need to weaken the randomness a bit, as
- * we can't call into the random generator from atomic context
- * due to locking constraints. We just leave canary
- * uninitialized and use the TSC based randomness on top of
- * it.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
get_random_bytes(&canary, sizeof(canary));
-#endif
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f08e527..2781119 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -30,8 +30,6 @@ struct thread_info {
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
- int preempt_lazy_count; /* 0 => lazy preemptable,
- <0 => BUG */
mm_segment_t addr_limit;
struct restart_block restart_block;
void __user *sysenter_return;
@@ -83,7 +81,6 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
-#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
@@ -108,7 +105,6 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
@@ -158,8 +154,6 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
#define PREEMPT_ACTIVE 0x10000000
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index d35f24e..1306d11 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5816e6a..e63a5bd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2396,8 +2396,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
/* If we are moving the irq we need to mask it */
- if (unlikely(irqd_is_setaffinity_pending(data) &&
- !irqd_irq_inprogress(data))) {
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
mask_ioapic(cfg);
return true;
}
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index a36d9cf..2861082 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -33,7 +33,6 @@ void common(void) {
OFFSET(TI_status, thread_info, status);
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_preempt_count, thread_info, preempt_count);
- OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3a7ab0b..b3218cd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -18,7 +18,6 @@
#include <linux/rcupdate.h>
#include <linux/kobject.h>
#include <linux/uaccess.h>
-#include <linux/kthread.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
@@ -42,7 +41,6 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
#include <linux/export.h>
-#include <linux/jiffies.h>
#include <asm/processor.h>
#include <asm/mce.h>
@@ -1270,7 +1268,7 @@ void mce_log_therm_throt_event(__u64 status)
static unsigned long check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
-static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+static DEFINE_PER_CPU(struct timer_list, mce_timer);
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
@@ -1280,10 +1278,13 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) =
mce_adjust_timer_default;
-static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+static void mce_timer_fn(unsigned long data)
{
+ struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned long iv;
+ WARN_ON(smp_processor_id() != data);
+
if (mce_available(__this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
@@ -1304,11 +1305,9 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
__this_cpu_write(mce_next_interval, iv);
/* Might have become 0 after CMCI storm subsided */
if (iv) {
- hrtimer_forward_now(timer, ns_to_ktime(
- jiffies_to_usecs(iv) * 1000ULL));
- return HRTIMER_RESTART;
+ t->expires = jiffies + iv;
+ add_timer_on(t, smp_processor_id());
}
- return HRTIMER_NORESTART;
}
/*
@@ -1316,37 +1315,28 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
*/
void mce_timer_kick(unsigned long interval)
{
- struct hrtimer *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = &__get_cpu_var(mce_timer);
+ unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);
- if (hrtimer_active(t)) {
- s64 exp;
- s64 intv_us;
-
- intv_us = jiffies_to_usecs(interval);
- exp = ktime_to_us(hrtimer_expires_remaining(t));
- if (intv_us < exp) {
- hrtimer_cancel(t);
- hrtimer_start_range_ns(t,
- ns_to_ktime(intv_us * 1000),
- 0, HRTIMER_MODE_REL_PINNED);
- }
+ if (timer_pending(t)) {
+ if (time_before(when, t->expires))
+ mod_timer_pinned(t, when);
} else {
- hrtimer_start_range_ns(t,
- ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL),
- 0, HRTIMER_MODE_REL_PINNED);
+ t->expires = round_jiffies(when);
+ add_timer_on(t, smp_processor_id());
}
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
}
-/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+/* Must not be called in IRQ context where del_timer_sync() can deadlock */
static void mce_timer_delete_all(void)
{
int cpu;
for_each_online_cpu(cpu)
- hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ del_timer_sync(&per_cpu(mce_timer, cpu));
}
static void mce_do_trigger(struct work_struct *work)
@@ -1356,63 +1346,6 @@ static void mce_do_trigger(struct work_struct *work)
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-static void __mce_notify_work(void)
-{
- /* Not more than two messages every minute */
- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-
- /* wake processes polling /dev/mcelog */
- wake_up_interruptible(&mce_chrdev_wait);
-
- /*
- * There is no risk of missing notifications because
- * work_pending is always cleared before the function is
- * executed.
- */
- if (mce_helper[0] && !work_pending(&mce_trigger_work))
- schedule_work(&mce_trigger_work);
-
- if (__ratelimit(&ratelimit))
- pr_info(HW_ERR "Machine check events logged\n");
-}
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-struct task_struct *mce_notify_helper;
-
-static int mce_notify_helper_thread(void *unused)
-{
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- if (kthread_should_stop())
- break;
- __mce_notify_work();
- }
- return 0;
-}
-
-static int mce_notify_work_init(void)
-{
- mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL,
- "mce-notify");
- if (!mce_notify_helper)
- return -ENOMEM;
-
- return 0;
-}
-
-static void mce_notify_work(void)
-{
- wake_up_process(mce_notify_helper);
-}
-#else
-static void mce_notify_work(void)
-{
- __mce_notify_work();
-}
-static inline int mce_notify_work_init(void) { return 0; }
-#endif
-
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
@@ -1420,8 +1353,19 @@ static inline int mce_notify_work_init(void) { return 0; }
*/
int mce_notify_irq(void)
{
+ /* Not more than two messages every minute */
+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+
if (test_and_clear_bit(0, &mce_need_notify)) {
- mce_notify_work();
+ /* wake processes polling /dev/mcelog */
+ wake_up_interruptible(&mce_chrdev_wait);
+
+ if (mce_helper[0])
+ schedule_work(&mce_trigger_work);
+
+ if (__ratelimit(&ratelimit))
+ pr_info(HW_ERR "Machine check events logged\n");
+
return 1;
}
return 0;
@@ -1692,7 +1636,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
}
}
-static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+static void mce_start_timer(unsigned int cpu, struct timer_list *t)
{
unsigned long iv = mce_adjust_timer(check_interval * HZ);
@@ -1701,17 +1645,16 @@ static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
if (mca_cfg.ignore_ce || !iv)
return;
- hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
- 0, HRTIMER_MODE_REL_PINNED);
+ t->expires = round_jiffies(jiffies + iv);
+ add_timer_on(t, smp_processor_id());
}
static void __mcheck_cpu_init_timer(void)
{
- struct hrtimer *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned int cpu = smp_processor_id();
- hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- t->function = mce_timer_fn;
+ setup_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
}
@@ -2386,8 +2329,6 @@ static void mce_disable_cpu(void *h)
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
- hrtimer_cancel(&__get_cpu_var(mce_timer));
-
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
for (i = 0; i < mca_cfg.banks; i++) {
@@ -2414,7 +2355,6 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
- __mcheck_cpu_init_timer();
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -2422,6 +2362,7 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ struct timer_list *t = &per_cpu(mce_timer, cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
@@ -2437,9 +2378,11 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+ del_timer_sync(t);
break;
case CPU_DOWN_FAILED:
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+ mce_start_timer(cpu, t);
break;
}
@@ -2501,8 +2444,6 @@ static __init int mcheck_init_device(void)
/* register character device /dev/mcelog */
misc_register(&mce_chrdev_device);
- err = mce_notify_work_init();
-
return err;
}
device_initcall_sync(mcheck_init_device);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b3cd3eb..7eb30af 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -203,18 +203,15 @@ static void __init intel_remapping_check(int num, int slot, int func)
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
- * Revision 13 of all triggering devices id in this quirk have
- * a problem draining interrupts when irq remapping is enabled,
- * and should be flagged as broken. Additionally revisions 0x12
- * and 0x22 of device id 0x3405 has this problem.
+ * Revision <= 13 of all triggering devices id in this quirk
+ * have a problem draining interrupts when irq remapping is
+ * enabled, and should be flagged as broken. Additionally
+ * revision 0x22 of device id 0x3405 has this problem.
*/
- if (revision == 0x13)
+ if (revision <= 0x13)
set_irq_remapping_broken();
- else if ((device == 0x3405) &&
- ((revision == 0x12) ||
- (revision == 0x22)))
+ else if (device == 0x3405 && revision == 0x22)
set_irq_remapping_broken();
-
}
/*
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index e491bfd..15a569a 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -364,22 +364,14 @@ ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_all
+need_resched:
movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
- jnz 1f
-
- cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
- jnz restore_all
- testl $_TIF_NEED_RESCHED_LAZY, %ecx
jz restore_all
-
-1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
- movl TI_flags(%ebp), %ecx # need_resched set ?
- testl $_TIF_NEED_RESCHED_MASK, %ecx
- jnz 1b
- jmp restore_all
+ jmp need_resched
END(resume_kernel)
#endif
CFI_ENDPROC
@@ -615,7 +607,7 @@ ENDPROC(system_call)
ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending:
- testl $_TIF_NEED_RESCHED_MASK, %ecx
+ testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
work_resched:
call schedule
@@ -628,7 +620,7 @@ work_resched:
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
- testl $_TIF_NEED_RESCHED_MASK, %ecx
+ testb $_TIF_NEED_RESCHED, %cl
jnz work_resched
work_notifysig: # deal with pending signals and
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index abca4f4..9ce2567 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -658,8 +658,8 @@ sysret_check:
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful:
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz sysret_signal
+ bt $TIF_NEED_RESCHED,%edx
+ jnc sysret_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -771,8 +771,8 @@ GLOBAL(int_with_check)
/* First do a reschedule test. */
/* edx: work, edi: workmask */
int_careful:
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz int_very_careful
+ bt $TIF_NEED_RESCHED,%edx
+ jnc int_very_careful
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -1071,8 +1071,8 @@ bad_iret:
/* edi: workmask, edx: work */
retint_careful:
CFI_RESTORE_STATE
- testl $_TIF_NEED_RESCHED_MASK,%edx
- jz retint_signal
+ bt $TIF_NEED_RESCHED,%edx
+ jnc retint_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
@@ -1105,15 +1105,9 @@ retint_signal:
ENTRY(retint_kernel)
cmpl $0,TI_preempt_count(%rcx)
jnz retint_restore_args
- bt $TIF_NEED_RESCHED,TI_flags(%rcx)
- jc 1f
-
- cmpl $0,TI_preempt_lazy_count(%rcx)
- jnz retint_restore_args
- bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
+ bt $TIF_NEED_RESCHED,TI_flags(%rcx)
jnc retint_restore_args
-
-1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
jnc retint_restore_args
call preempt_schedule_irq
jmp exit_intr
@@ -1347,7 +1341,6 @@ bad_gs:
jmp 2b
.previous
-#ifndef CONFIG_PREEMPT_RT_FULL
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
CFI_STARTPROC
@@ -1367,7 +1360,6 @@ ENTRY(call_softirq)
ret
CFI_ENDPROC
END(call_softirq)
-#endif
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
@@ -1537,7 +1529,7 @@ paranoid_userspace:
movq %rsp,%rdi /* &pt_regs */
call sync_regs
movq %rax,%rsp /* switch stack for scheduling */
- testl $_TIF_NEED_RESCHED_MASK,%ebx
+ testl $_TIF_NEED_RESCHED,%ebx
jnz paranoid_schedule
movl %ebx,%edx /* arg3: thread flags */
TRACE_IRQS_ON
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9da1bc7..4186755 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -149,7 +149,6 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
}
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void)
{
unsigned long flags;
@@ -180,7 +179,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-#endif
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 831f247..d04d3ec 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
return true;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
+
extern void call_softirq(void);
asmlinkage void do_softirq(void)
@@ -108,4 +108,3 @@ asmlinkage void do_softirq(void)
}
local_irq_restore(flags);
}
-#endif
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 3d21f7b..1de84e3 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -38,7 +38,6 @@ __visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
exiting_irq();
}
-#ifndef CONFIG_PREEMPT_RT_FULL
void arch_irq_work_raise(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
@@ -49,4 +48,3 @@ void arch_irq_work_raise(void)
apic_wait_icr_idle();
#endif
}
-#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 6ce0537..884f98f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -36,7 +36,6 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
-#include <linux/highmem.h>
#include <asm/pgtable.h>
#include <asm/ldt.h>
@@ -220,35 +219,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
}
EXPORT_SYMBOL_GPL(start_thread);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-{
- int i;
-
- /*
- * Clear @prev's kmap_atomic mappings
- */
- for (i = 0; i < prev_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
- pte_t *ptep = kmap_pte - idx;
-
- kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
- }
- /*
- * Restore @next_p's kmap_atomic mappings
- */
- for (i = 0; i < next_p->kmap_idx; i++) {
- int idx = i + KM_TYPE_NR * smp_processor_id();
-
- if (!pte_none(next_p->kmap_pte[i]))
- set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
- }
-}
-#else
-static inline void
-switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-#endif
-
/*
* switch_to(x,y) should switch tasks from x to y.
@@ -328,8 +298,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
- switch_kmaps(prev_p, next_p);
-
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 52dbf1e..ff898bb 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node);
#endif
+
+#ifdef CONFIG_PCI
+/*
+ * Processor does not ensure DRAM scrub read/write sequence
+ * is atomic wrt accesses to CC6 save state area. Therefore
+ * if a concurrent scrub read/write access is to same address
+ * the entry may appear as if it is not written. This quirk
+ * applies to Fam16h models 00h-0Fh
+ *
+ * See "Revision Guide" for AMD F16h models 00h-0fh,
+ * document 51810 rev. 3.04, Nov 2013
+ */
+static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+{
+ u32 val;
+
+ /*
+ * Suggested workaround:
+ * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
+ */
+ pci_read_config_dword(dev, 0x58, &val);
+ if (val & 0x1F) {
+ val &= ~(0x1F);
+ pci_write_config_dword(dev, 0x58, val);
+ }
+
+ pci_read_config_dword(dev, 0x5C, &val);
+ if (val & BIT(0)) {
+ val &= ~BIT(0);
+ pci_write_config_dword(dev, 0x5c, val);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+ amd_disable_seq_and_redirect_scrub);
+
+#endif
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index ecfe089..9e5de68 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -739,14 +739,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- if (unlikely(current->forced_info.si_signo)) {
- struct task_struct *t = current;
- force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
- t->forced_info.si_signo = 0;
- }
-#endif
-
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 6663bb5..8c8093b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -86,21 +86,9 @@ static inline void conditional_sti(struct pt_regs *regs)
local_irq_enable();
}
-static inline void conditional_sti_ist(struct pt_regs *regs)
+static inline void preempt_conditional_sti(struct pt_regs *regs)
{
-#ifdef CONFIG_X86_64
- /*
- * X86_64 uses a per CPU stack on the IST for certain traps
- * like int3. The task can not be preempted when using one
- * of these stacks, thus preemption must be disabled, otherwise
- * the stack can be corrupted if the task is scheduled out,
- * and another task comes in and uses this stack.
- *
- * On x86_32 the task keeps its own stack and it is OK if the
- * task schedules out.
- */
inc_preempt_count();
-#endif
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
@@ -111,13 +99,11 @@ static inline void conditional_cli(struct pt_regs *regs)
local_irq_disable();
}
-static inline void conditional_cli_ist(struct pt_regs *regs)
+static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
-#ifdef CONFIG_X86_64
dec_preempt_count();
-#endif
}
static int __kprobes
@@ -250,9 +236,9 @@ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
prev_state = exception_enter();
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
- conditional_sti_ist(regs);
+ preempt_conditional_sti(regs);
do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
- conditional_cli_ist(regs);
+ preempt_conditional_cli(regs);
}
exception_exit(prev_state);
}
@@ -361,9 +347,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
- conditional_sti_ist(regs);
+ preempt_conditional_sti(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
- conditional_cli_ist(regs);
+ preempt_conditional_cli(regs);
debug_stack_usage_dec();
exit:
exception_exit(prev_state);
@@ -469,12 +455,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
debug_stack_usage_inc();
/* It's safe to allow irq's after DR6 has been saved */
- conditional_sti_ist(regs);
+ preempt_conditional_sti(regs);
if (regs->flags & X86_VM_MASK) {
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
- conditional_cli_ist(regs);
+ preempt_conditional_cli(regs);
debug_stack_usage_dec();
goto exit;
}
@@ -494,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code);
- conditional_cli_ist(regs);
+ preempt_conditional_cli(regs);
debug_stack_usage_dec();
exit:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dce0df8..74dd129 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2664,6 +2664,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int emulate = 0;
gfn_t pseudo_gfn;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return 0;
+
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
@@ -2834,6 +2837,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return false;
+
if (!page_fault_can_be_fast(error_code))
return false;
@@ -3229,6 +3235,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return spte;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
@@ -4557,6 +4566,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
u64 spte;
int nr_sptes = 0;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return nr_sptes;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad75d77..cba218a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ goto out_gpte_changed;
+
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/
mmu_topup_memory_caches(vcpu);
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ WARN_ON(1);
+ return;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6128914..59181e6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7294,8 +7294,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
+ free_nested(vmx);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e1b7b17..92af83d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5493,13 +5493,6 @@ int kvm_arch_init(void *opaque)
goto out;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
- printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
- return -EOPNOTSUPP;
- }
-#endif
-
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b5c8e37..5b90bbcad9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1098,7 +1098,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(!mm || pagefault_disabled())) {
+ if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 7f96844..4500142 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -32,7 +32,6 @@ EXPORT_SYMBOL(kunmap);
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- pte_t pte = mk_pte(page, prot);
unsigned long vaddr;
int idx, type;
@@ -46,10 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_pte(kmap_pte-idx, pte);
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
@@ -92,9 +88,6 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 62377d6..7b179b4 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -56,7 +56,6 @@ EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- pte_t pte = pfn_pte(pfn, prot);
unsigned long vaddr;
int idx, type;
@@ -65,12 +64,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- WARN_ON(!pte_none(*(kmap_pte - idx)));
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = pte;
-#endif
- set_pte(kmap_pte - idx, pte);
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
@@ -116,9 +110,6 @@ iounmap_atomic(void __iomem *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
- current->kmap_pte[type] = __pte(0);
-#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 266ca91..5ecf651 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0;
}
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
- for (i = 0; i < slit->locality_count; i++)
- for (j = 0; j < slit->locality_count; j++)
+ for (i = 0; i < slit->locality_count; i++) {
+ if (pxm_to_node(i) == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < slit->locality_count; j++) {
+ if (pxm_to_node(j) == NUMA_NO_NODE)
+ continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
+ }
+ }
}
/* Callback for Proximity Domain -> x2APIC mapping */
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1..0149575 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d5af43a..fdc3ba2 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index ff5c9c7..70fa7bc 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
/* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (!mm || pagefault_disabled()) {
+ if (in_atomic() || !mm) {
bad_page_fault(regs, address, SIGSEGV);
return;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index f703f97..fce4b93 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON_NONRT(!irqs_disabled());
+ WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
@@ -2925,7 +2925,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
blk_run_queue_async(q);
else
__blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2973,6 +2973,7 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
+ unsigned long flags;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -2990,6 +2991,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL;
depth = 0;
+ /*
+ * Save and disable interrupts here, to avoid doing it for every
+ * queue lock we have to take.
+ */
+ local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -3002,7 +3008,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
- spin_lock_irq(q->queue_lock);
+ spin_lock(q->queue_lock);
}
/*
@@ -3029,6 +3035,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
+
+ local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 899d3e4..46cd7bd 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -8,7 +8,6 @@
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/slab.h>
-#include <linux/delay.h>
#include "blk.h"
@@ -111,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_chill();
+ cpu_relax();
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
@@ -189,7 +188,7 @@ retry:
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_chill();
+ cpu_relax();
goto retry;
}
}
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 15b4f08..4b8d9b54 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -38,7 +38,6 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(blk_iopoll_sched);
@@ -136,7 +135,6 @@ static void blk_iopoll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
}
/**
@@ -206,7 +204,6 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
&__get_cpu_var(blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
}
return NOTIFY_OK;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 814b3db..ec9e606 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -51,7 +51,6 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
/*
@@ -94,7 +93,6 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
&__get_cpu_var(blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
}
return NOTIFY_OK;
@@ -152,7 +150,6 @@ do_local:
goto do_local;
local_irq_restore(flags);
- preempt_check_resched_rt();
}
/**
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5013cad..7a1ae87 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -684,13 +684,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
- return srcu_notifier_chain_register(&crypto_chain, nb);
+ return blocking_notifier_chain_register(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_register_notifier);
int crypto_unregister_notifier(struct notifier_block *nb)
{
- return srcu_notifier_chain_unregister(&crypto_chain, nb);
+ return blocking_notifier_chain_unregister(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
diff --git a/crypto/api.c b/crypto/api.c
index 9d68122..a2b39c5 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
-SRCU_NOTIFIER_HEAD(crypto_chain);
+BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
{
int ok;
- ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ ok = blocking_notifier_call_chain(&crypto_chain, val, v);
if (ok == NOTIFY_DONE) {
request_module("cryptomgr");
- ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ ok = blocking_notifier_call_chain(&crypto_chain, val, v);
}
return ok;
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index f86dc5f..d9351ee 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/time.h>
#include <crypto/public_key.h>
struct x509_certificate {
diff --git a/crypto/internal.h b/crypto/internal.h
index a5db167..bd39bfc 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -48,7 +48,7 @@ struct crypto_larval {
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
-extern struct srcu_notifier_head crypto_chain;
+extern struct blocking_notifier_head crypto_chain;
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
static inline void crypto_notify(unsigned long val, void *v)
{
- srcu_notifier_call_chain(&crypto_chain, val, v);
+ blocking_notifier_call_chain(&crypto_chain, val, v);
}
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 411f68d..90e846f 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -235,7 +235,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
* interrupt level
*/
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
/* Mutex for _OSI support */
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 221c567..8d2e866 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
- raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Clear the fixed events in PM1 A/B */
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
if (ACPI_FAILURE(status))
goto exit;
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 4c4b0cc..5ee7a81 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -365,7 +365,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/*
* At this point, we know that the parent register is one of the
@@ -426,7 +426,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
unlock_and_exit:
- raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 2f4ed8b..08c3232 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
return_ACPI_STATUS (status);
}
- status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
+ status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
- acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_lock(acpi_gbl_hardware_lock);
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a557738..3aa89eb 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
input_sync(input);
pm_wakeup_event(&device->dev, 0);
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class,
+ dev_name(&device->dev),
+ event, ++button->pushed);
}
break;
default:
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3cc0b92..51b7008 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -213,13 +213,13 @@ unlock:
spin_unlock_irqrestore(&ec->lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_ec_sync_query(ec, NULL);
}
return 0;
}
@@ -471,10 +471,8 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
/*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
@@ -483,7 +481,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query_unlocked(ec, &value);
+ status = acpi_ec_sync_query(ec, &value);
if (status || !value)
break;
}
@@ -610,13 +608,18 @@ static void acpi_ec_run(void *cxt)
kfree(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (data)
+ *data = value;
+ if (status)
return status;
+
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
@@ -639,7 +642,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
if (!ec)
return;
mutex_lock(&ec->mutex);
- acpi_ec_sync_query(ec);
+ acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex);
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99e5158..c09e6f6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -675,11 +675,8 @@ static void acpi_hibernation_leave(void)
/* Reprogram control registers */
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
- if (facs && s4_hardware_signature != facs->hardware_signature) {
- printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
- "cannot resume!\n");
- panic("ACPI S4 hardware signature mismatch");
- }
+ if (facs && s4_hardware_signature != facs->hardware_signature)
+ pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
/* Restore the NVS memory area */
suspend_nvs_restore();
/* Allow EC transactions to happen. */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2afbd46..b603720 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
unsigned long flags;
unsigned int consumed;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return consumed;
}
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned long flags;
/* FIXME: use a bounce buffer */
- local_irq_save_nort(flags);
+ local_irq_save(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
kunmap_atomic(buf);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
} else {
buf = page_address(page);
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
@@ -864,7 +864,7 @@ next_sg:
unsigned long flags;
/* FIXME: use bounce buffer */
- local_irq_save_nort(flags);
+ local_irq_save(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -872,7 +872,7 @@ next_sg:
count, rw);
kunmap_atomic(buf);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d251543..8fb2953 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -905,7 +905,7 @@ bio_pageinc(struct bio *bio)
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv->bv_page);
+ page = compound_head(bv->bv_page);
atomic_inc(&page->_count);
}
}
@@ -918,7 +918,7 @@ bio_pagedec(struct bio *bio)
int i;
bio_for_each_segment(bv, bio, i) {
- page = compound_trans_head(bv->bv_page);
+ page = compound_head(bv->bv_page);
atomic_dec(&page->_count);
}
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 04ceb7e..690011d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3691,9 +3691,12 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
@@ -3746,17 +3749,29 @@ static unsigned int floppy_check_events(struct gendisk *disk,
* a disk in the drive, and whether that disk is writable.
*/
-static void floppy_rb0_complete(struct bio *bio, int err)
+struct rb0_cbdata {
+ int drive;
+ struct completion complete;
+};
+
+static void floppy_rb0_cb(struct bio *bio, int err)
{
- complete((struct completion *)bio->bi_private);
+ struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
+ int drive = cbdata->drive;
+
+ if (err) {
+ pr_info("floppy: error %d while reading block 0", err);
+ set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ }
+ complete(&cbdata->complete);
}
-static int __floppy_read_block_0(struct block_device *bdev)
+static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
- struct completion complete;
struct page *page;
+ struct rb0_cbdata cbdata;
size_t size;
page = alloc_page(GFP_NOIO);
@@ -3769,6 +3784,8 @@ static int __floppy_read_block_0(struct block_device *bdev)
if (!size)
size = 1024;
+ cbdata.drive = drive;
+
bio_init(&bio);
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
@@ -3779,13 +3796,14 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_bdev = bdev;
bio.bi_sector = 0;
bio.bi_flags = (1 << BIO_QUIET);
- init_completion(&complete);
- bio.bi_private = &complete;
- bio.bi_end_io = floppy_rb0_complete;
+ bio.bi_private = &cbdata;
+ bio.bi_end_io = floppy_rb0_cb;
submit_bio(READ, &bio);
process_fd_request();
- wait_for_completion(&complete);
+
+ init_completion(&cbdata.complete);
+ wait_for_completion(&cbdata.complete);
__free_page(page);
@@ -3827,7 +3845,7 @@ static int floppy_revalidate(struct gendisk *disk)
UDRS->generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
- res = __floppy_read_block_0(opened_bdev[drive]);
+ res = __floppy_read_block_0(opened_bdev[drive], drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d593c99..6e30356 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -223,6 +223,7 @@ static struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth device */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ } /* Terminating entry */
};
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a22a7a5..8156caf 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
static inline int read_all_bytes(struct si_sm_data *bt)
{
- unsigned char i;
+ unsigned int i;
/*
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 15e4a60..e5bdd1a 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -249,6 +249,9 @@ struct smi_info {
/* The timer for this si. */
struct timer_list si_timer;
+ /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+ bool timer_running;
+
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
@@ -435,6 +438,13 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+ smi_info->last_timeout_jiffies = jiffies;
+ mod_timer(&smi_info->si_timer, new_val);
+ smi_info->timer_running = true;
+}
+
/*
* When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system
@@ -447,8 +457,7 @@ static inline void disable_si_irq(struct smi_info *smi_info)
start_disable_irq(smi_info);
smi_info->interrupt_disabled = 1;
if (!atomic_read(&smi_info->stop_operation))
- mod_timer(&smi_info->si_timer,
- jiffies + SI_TIMEOUT_JIFFIES);
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
}
}
@@ -908,15 +917,7 @@ static void sender(void *send_info,
list_add_tail(&msg->link, &smi_info->xmit_msgs);
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
- /*
- * last_timeout_jiffies is updated here to avoid
- * smi_timeout() handler passing very large time_diff
- * value to smi_event_handler() that causes
- * the send command to abort.
- */
- smi_info->last_timeout_jiffies = jiffies;
-
- mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
@@ -1005,6 +1006,17 @@ static int ipmi_thread(void *data)
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
+
+ /*
+ * If the driver is doing something, there is a possible
+ * race with the timer. If the timer handler see idle,
+ * and the thread here sees something else, the timer
+ * handler won't restart the timer even though it is
+ * required. So start it here if necessary.
+ */
+ if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
@@ -1074,10 +1086,6 @@ static void smi_timeout(unsigned long data)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-
- smi_info->last_timeout_jiffies = jiffies_now;
-
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
@@ -1099,7 +1107,10 @@ static void smi_timeout(unsigned long data)
do_mod_timer:
if (smi_result != SI_SM_IDLE)
- mod_timer(&(smi_info->si_timer), timeout);
+ smi_mod_timer(smi_info, timeout);
+ else
+ smi_info->timer_running = false;
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1147,8 +1158,7 @@ static int smi_start_processing(void *send_info,
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
- new_smi->last_timeout_jiffies = jiffies;
- mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/*
* Check if the user forcefully enabled the daemon.
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f895a8c..d1f4675 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -100,6 +100,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
ssize_t read, sz;
char *ptr;
+ if (p != *ppos)
+ return 0;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
@@ -158,6 +161,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
unsigned long copied;
void *ptr;
+ if (p != *ppos)
+ return -EFBIG;
+
if (!valid_phys_addr_range(p, count))
return -EFAULT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ddcbcad..7a744d3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -673,12 +673,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
preempt_disable();
/* if over the trickle threshold, use only 1 in 4096 samples */
if (input_pool.entropy_count > trickle_thresh &&
- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
- preempt_enable();
- return;
- }
+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+ goto out;
- preempt_enable();
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -719,6 +716,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
credit_entropy_bits(&input_pool,
min_t(int, fls(delta>>1), 11));
}
+out:
+ preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,
@@ -739,16 +738,18 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+void add_interrupt_randomness(int irq, int irq_flags)
{
struct entropy_store *r;
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
__u32 input[4], cycles = random_get_entropy();
input[0] = cycles ^ jiffies;
input[1] = irq;
- if (ip) {
+ if (regs) {
+ __u64 ip = instruction_pointer(regs);
input[2] = ip;
input[3] = ip >> 32;
}
@@ -762,11 +763,7 @@ void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
fast_pool->last = now;
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
-#ifndef CONFIG_PREEMPT_RT_FULL
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
-#else
- mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
-#endif
/*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index a00dfaf..8a61872 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -23,7 +23,8 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
- * source, used in either periodic or oneshot mode.
+ * source, used in either periodic or oneshot mode. This runs
+ * at 32 KiHZ, and can handle delays of up to two seconds.
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
@@ -73,7 +74,6 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
- u32 freq;
void __iomem *regs;
};
@@ -82,6 +82,13 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+ * because using one of the divided clocks would usually mean the
+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+ *
+ * A divided clock could be good for high resolution timers, since
+ * 30.5 usec resolution can seem "low".
+ */
static u32 timer_clock;
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@@ -104,12 +111,11 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
case CLOCK_EVT_MODE_PERIODIC:
clk_enable(tcd->clk);
- /* count up to RC, then irq and restart */
+ /* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
- __raw_writel((tcd->freq + HZ/2)/HZ,
- tcaddr + ATMEL_TC_REG(2, RC));
+ __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -122,7 +128,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
case CLOCK_EVT_MODE_ONESHOT:
clk_enable(tcd->clk);
- /* count up to RC, then irq and stop */
+ /* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
@@ -151,12 +157,8 @@ static struct tc_clkevt_device clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
-#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
/* Should be lower than at91rm9200's system timer */
.rating = 125,
-#else
- .rating = 200,
-#endif
.set_next_event = tc_next_event,
.set_mode = tc_mode,
},
@@ -182,9 +184,8 @@ static struct irqaction tc_irqaction = {
.handler = ch2_irq,
};
-static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
- unsigned divisor = atmel_tc_divisors[divisor_idx];
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
@@ -192,15 +193,11 @@ static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
- timer_clock = divisor_idx;
- if (!divisor)
- clkevt.freq = 32768;
- else
- clkevt.freq = clk_get_rate(t2_clk) / divisor;
+ timer_clock = clk32k_divisor_idx;
clkevt.clkevt.cpumask = cpumask_of(0);
- clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
+ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
setup_irq(irq, &tc_irqaction);
}
@@ -325,11 +322,8 @@ static int __init tcb_clksrc_init(void)
clocksource_register_hz(&clksrc, divided_rate);
/* channel 2: periodic and oneshot timer support */
-#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
setup_clkevents(tc, clk32k_divisor_idx);
-#else
- setup_clkevents(tc, best_divisor_idx);
-#endif
+
return 0;
}
arch_initcall(tcb_clksrc_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 587e020..e559095 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
static unsigned int pit_read_sched_clock(void)
{
- return __raw_readl(clksrc_base + PITCVAL);
+ return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 85f1c8c..4fe6521 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -26,41 +26,108 @@
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
+static unsigned int param_busfreq = 0;
+static unsigned int param_max_multiplier = 0;
+
+module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
+MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
+
+module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
static struct cpufreq_frequency_table clock_ratio[] = {
- {45, /* 000 -> 4.5x */ 0},
+ {60, /* 110 -> 6.0x */ 0},
+ {55, /* 011 -> 5.5x */ 0},
{50, /* 001 -> 5.0x */ 0},
+ {45, /* 000 -> 4.5x */ 0},
{40, /* 010 -> 4.0x */ 0},
- {55, /* 011 -> 5.5x */ 0},
- {20, /* 100 -> 2.0x */ 0},
- {30, /* 101 -> 3.0x */ 0},
- {60, /* 110 -> 6.0x */ 0},
{35, /* 111 -> 3.5x */ 0},
+ {30, /* 101 -> 3.0x */ 0},
+ {20, /* 100 -> 2.0x */ 0},
{0, CPUFREQ_TABLE_END}
};
+static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
+static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
+
+static const struct {
+ unsigned freq;
+ unsigned mult;
+} usual_frequency_table[] = {
+ { 400000, 40 }, // 100 * 4
+ { 450000, 45 }, // 100 * 4.5
+ { 475000, 50 }, // 95 * 5
+ { 500000, 50 }, // 100 * 5
+ { 506250, 45 }, // 112.5 * 4.5
+ { 533500, 55 }, // 97 * 5.5
+ { 550000, 55 }, // 100 * 5.5
+ { 562500, 50 }, // 112.5 * 5
+ { 570000, 60 }, // 95 * 6
+ { 600000, 60 }, // 100 * 6
+ { 618750, 55 }, // 112.5 * 5.5
+ { 660000, 55 }, // 120 * 5.5
+ { 675000, 60 }, // 112.5 * 6
+ { 720000, 60 }, // 120 * 6
+};
+
+#define FREQ_RANGE 3000
/**
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
*
- * Returns the current setting of the frequency multiplier. Core clock
+ * Returns the current setting of the frequency multiplier. Core clock
* speed is frequency of the Front-Side Bus multiplied with this value.
*/
static int powernow_k6_get_cpu_multiplier(void)
{
- u64 invalue = 0;
+ unsigned long invalue = 0;
u32 msrval;
+ local_irq_disable();
+
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue = inl(POWERNOW_IOPORT + 0x8);
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
- return clock_ratio[(invalue >> 5)&7].driver_data;
+ local_irq_enable();
+
+ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
}
+static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
+{
+ unsigned long outvalue, invalue;
+ unsigned long msrval;
+ unsigned long cr0;
+
+ /* we now need to transform best_i to the BVC format, see AMD#23446 */
+
+ /*
+ * The processor doesn't respond to inquiry cycles while changing the
+ * frequency, so we must disable cache.
+ */
+ local_irq_disable();
+ cr0 = read_cr0();
+ write_cr0(cr0 | X86_CR0_CD);
+ wbinvd();
+
+ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ invalue = invalue & 0x1f;
+ outvalue = outvalue | invalue;
+ outl(outvalue, (POWERNOW_IOPORT + 0x8));
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ write_cr0(cr0);
+ local_irq_enable();
+}
/**
* powernow_k6_set_state - set the PowerNow! multiplier
@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
static void powernow_k6_set_state(struct cpufreq_policy *policy,
unsigned int best_i)
{
- unsigned long outvalue = 0, invalue = 0;
- unsigned long msrval;
struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
@@ -85,18 +150,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- /* we now need to transform best_i to the BVC format, see AMD#23446 */
-
- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
-
- msrval = POWERNOW_IOPORT + 0x1;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
- invalue = inl(POWERNOW_IOPORT + 0x8);
- invalue = invalue & 0xf;
- outvalue = outvalue | invalue;
- outl(outvalue , (POWERNOW_IOPORT + 0x8));
- msrval = POWERNOW_IOPORT + 0x0;
- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+ powernow_k6_set_cpu_multiplier(best_i);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
@@ -141,18 +195,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
return 0;
}
-
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
int result;
+ unsigned khz;
if (policy->cpu != 0)
return -ENODEV;
- /* get frequencies */
- max_multiplier = powernow_k6_get_cpu_multiplier();
- busfreq = cpu_khz / max_multiplier;
+ max_multiplier = 0;
+ khz = cpu_khz;
+ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
+ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
+ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
+ khz = usual_frequency_table[i].freq;
+ max_multiplier = usual_frequency_table[i].mult;
+ break;
+ }
+ }
+ if (param_max_multiplier) {
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == param_max_multiplier) {
+ max_multiplier = param_max_multiplier;
+ goto have_max_multiplier;
+ }
+ }
+ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ return -EINVAL;
+ }
+
+ if (!max_multiplier) {
+ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
+ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ return -EOPNOTSUPP;
+ }
+
+have_max_multiplier:
+ param_max_multiplier = max_multiplier;
+
+ if (param_busfreq) {
+ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
+ busfreq = param_busfreq / 10;
+ goto have_busfreq;
+ }
+ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ return -EINVAL;
+ }
+
+ busfreq = khz / max_multiplier;
+have_busfreq:
+ param_busfreq = busfreq * 10;
/* table init */
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
@@ -164,7 +257,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
}
/* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 200000;
+ policy->cpuinfo.transition_latency = 500000;
policy->cur = busfreq * max_multiplier;
result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3c9e4e9..d43a620 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1238,9 +1238,17 @@ static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
if (num_dcts_intlv == 2) {
select = (sys_addr >> 8) & 0x3;
channel = select ? 0x3 : 0;
- } else if (num_dcts_intlv == 4)
- channel = (sys_addr >> 8) & 0x7;
-
+ } else if (num_dcts_intlv == 4) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ switch (intlv_addr) {
+ case 0x4:
+ channel = (sys_addr >> 8) & 0x3;
+ break;
+ case 0x5:
+ channel = (sys_addr >> 9) & 0x3;
+ break;
+ }
+ }
return channel;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 86d779a..32bbba0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -233,6 +233,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
+ info->fix.smem_start = cdev->dev->mode_config.fb_base;
+ info->fix.smem_len = cdev->mc.vram_size;
+
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 379a47e..3592616 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
int cirrus_vga_get_modes(struct drm_connector *connector)
{
- /* Just add a static list of modes */
- drm_add_modes_noedid(connector, 640, 480);
- drm_add_modes_noedid(connector, 800, 600);
- drm_add_modes_noedid(connector, 1024, 768);
- drm_add_modes_noedid(connector, 1280, 1024);
+ int count;
- return 4;
+ /* Just add a static list of modes */
+ count = drm_add_modes_noedid(connector, 1280, 1024);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return count;
}
static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d0d3eae..1cb5026 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3296,6 +3296,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (drm_mode_width(mode) == hpref &&
+ drm_mode_height(mode) == vpref)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e2..49557c9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1163,6 +1163,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
+ bool prefer_non_interlace;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->specified == false)
@@ -1174,6 +1175,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
+ prefer_non_interlace = !cmdline_mode->interlace;
+ again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
@@ -1188,10 +1191,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
+ } else if (prefer_non_interlace) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ continue;
}
return mode;
}
+ if (prefer_non_interlace) {
+ prefer_non_interlace = false;
+ goto again;
+ }
+
create_mode:
mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
cmdline_mode);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 434ea84..f92da0a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -628,6 +628,11 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
* code gets preempted or delayed for some reason.
*/
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+ preempt_disable();
+
/* Get system timestamp before query. */
stime = ktime_get();
@@ -639,6 +644,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
if (!drm_timestamp_monotonic)
mono_time_offset = ktime_get_monotonic_offset();
+ preempt_enable();
+
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aaeac32..b00b32c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4792,7 +4792,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 979a6ea..bf34577 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1129,9 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
-#ifndef CONFIG_PREEMPT_RT_BASE
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-#endif
i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4ca..c7fa2e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -536,7 +536,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
- false);
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index fe4a7d1..c077df0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
if (dev_priv->gtt.stolen_size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9dcf34f..5aa836e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10073,8 +10073,7 @@ static struct intel_quirk intel_quirks[] = {
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 830/845 need to leave pipe A & dpll A up */
- { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ /* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8160fbd..200e856 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -66,6 +66,7 @@ bool nouveau_is_v1_dsm(void) {
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
+#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
@@ -378,6 +379,11 @@ void nouveau_unregister_dsm_handler(void)
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
+#else
+void nouveau_register_dsm_handler(void) {}
+void nouveau_unregister_dsm_handler(void) {}
+void nouveau_switcheroo_optimus_dsm(void) {}
+#endif
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 2a2879e..bbcd2dd 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -226,13 +226,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
enable ? AUDIO_ENABLED : 0);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
static const u32 pin_offsets[7] =
@@ -269,7 +271,8 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b347fff..da4e504 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -257,6 +257,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -358,12 +367,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -376,15 +389,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b8..bffac10 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 7f3b0d9..d38b725 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector);
@@ -446,6 +443,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -517,6 +518,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -637,11 +641,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f44ca58..b11433f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2717,6 +2717,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 49f6cc0..3518053 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -574,17 +574,6 @@ int lg4ff_init(struct hid_device *hid)
if (error)
return error;
- /* Check if autocentering is available and
- * set the centering force to zero by default */
- if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
- if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
- else
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
-
- dev->ff->set_autocenter(dev, 0);
- }
-
/* Get private driver data */
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
@@ -605,6 +594,17 @@ int lg4ff_init(struct hid_device *hid)
entry->max_range = lg4ff_devices[i].max_range;
entry->set_range = lg4ff_devices[i].set_range;
+ /* Check if autocentering is available and
+ * set the centering force to zero by default */
+ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+ if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ else
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+ dev->ff->set_autocenter(dev, 0);
+ }
+
/* Create sysfs interface */
error = device_create_file(&hid->dev, &dev_attr_range);
if (error)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6a6dd5c..d0a0034 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -313,13 +313,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
+ device_destroy(hidraw_class,
+ MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
}
if (!hidraw->open) {
if (!hidraw->exist) {
- device_destroy(hidraw_class,
- MKDEV(hidraw_major, hidraw->minor));
hidraw_table[hidraw->minor] = NULL;
kfree(hidraw);
} else {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cd30d98..01892bd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -109,6 +109,8 @@ config I2C_I801
Avoton (SOC)
Wellsburg (PCH)
Coleto Creek (PCH)
+ Wildcat Point-LP (PCH)
+ BayTrail (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b2b8aa9..c512145 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -40,7 +40,9 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4296d17..0444f7a 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -59,6 +59,8 @@
Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
+ Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
+ BayTrail (SOC) 0x0f12 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -160,6 +162,7 @@
STATUS_ERROR_FLAGS)
/* Older devices have their ID defined in <linux/pci_ids.h> */
+#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -177,6 +180,7 @@
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
struct i801_mux_config {
char *gpio_chip;
@@ -819,6 +823,8 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 2f66478..9967a6f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -879,12 +879,15 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ spin_lock(&dev->lock);
mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
+ stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
+ spin_unlock(&dev->lock);
+
return ret;
}
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 394f142f..36f76e2 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
if (m5229_revision < 0xC2) {
/*
@@ -325,7 +325,7 @@ out:
}
pci_dev_put(north);
pci_dev_put(isa_dev);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return 0;
}
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 0d0a966..696b6c1 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
dma_old = inb(base + 2);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
if (dma_new != dma_old)
outb(dma_new, base + 2);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
index 4169433..1976397 100644
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save_nort(flags);
+ local_irq_save(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
if (((len + 1) & 3) < 2)
return;
@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save_nort(flags);
+ local_irq_save(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
outsl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
if (((len + 1) & 3) < 2)
return;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 079ae6b..177db6d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
/* local CPU only, as if we were handling an interrupt */
- local_irq_disable_nort();
+ local_irq_disable();
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index f014dd1..376f2dc 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
if ((stat & ATA_BUSY) == 0)
break;
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
*rstat = stat;
return -EBUSY;
}
}
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
/*
* Allow status to settle, then read it again.
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 0964500..2a744a9 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
int bswap = 1;
/* local CPU only; some systems need this */
- local_irq_save_nort(flags);
+ local_irq_save(flags);
/* read 512 bytes of id info */
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 2cecea7..dabb88b 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
page_is_high = PageHighMem(page);
if (page_is_high)
- local_irq_save_nort(flags);
+ local_irq_save(flags);
buf = kmap_atomic(page) + offset;
@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
kunmap_atomic(buf);
if (page_is_high)
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
len -= nr_bytes;
}
@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- local_irq_disable_nort();
+ local_irq_disable();
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d47bb0f..5323581 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1,7 +1,7 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
{
.enter = NULL }
};
+static struct cpuidle_state avn_cstates[] __initdata = {
+ {
+ .name = "C1-AVN",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle },
+ {
+ .name = "C6-AVN",
+ .desc = "MWAIT 0x51",
+ .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 15,
+ .target_residency = 45,
+ .enter = &intel_idle },
+};
/**
* intel_idle
@@ -465,6 +481,11 @@ static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_avn = {
+ .state_table = avn_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -486,6 +507,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
ICPU(0x46, idle_cpu_hsw),
+ ICPU(0x4D, idle_cpu_avn),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3800ef5..cecb98a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_mcast_stop_thread(dev, 0);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
/* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ba93ef8..09c7129 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1652,7 +1652,6 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
static void
isert_cq_rx_comp_err(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_conn *conn = isert_conn->conn;
if (isert_conn->post_recv_buf_count)
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index b4fe94b..922a7fe 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
- local_irq_save_nort(flags);
+ local_irq_save(flags);
GET_TIME(t1);
for (t = 0; t < 50; t++) gameport_read(gameport);
GET_TIME(t2);
GET_TIME(t3);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
udelay(i * 10);
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
}
@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
- local_irq_save_nort(flags);
+ local_irq_save(flags);
rdtscl(t1);
for (t = 0; t < 50; t++) gameport_read(gameport);
rdtscl(t2);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 888a81a..0aaea7a 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -410,7 +410,6 @@ static int cypress_set_input_params(struct input_dev *input,
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 597e9b8..ef1cf52 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
unsigned char *packet = psmouse->packet;
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
@@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
}
/*
+ * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
+ * fw_version for this is based on the following fw_version & caps table:
+ *
+ * Laptop-model: fw_version: caps: buttons:
+ * Acer S3 0x461f00 10, 13, 0e clickpad
+ * Acer S7-392 0x581f01 50, 17, 0d clickpad
+ * Acer V5-131 0x461f02 01, 16, 0c clickpad
+ * Acer V5-551 0x461f00 ? clickpad
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
+ * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
+ * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
+ * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
+ * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
+ * Samsung NP900X3E-A02 0x575f03 ? clickpad
+ * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
+ * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
+ * Samsung RF710 0x450f00 ? 2 hw buttons
+ * System76 Pangolin 0x250f01 ? 2 hw buttons
+ * (*) + 3 trackpoint buttons
+ */
+static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
+
+ if (etd->fw_version & 0x001000) {
+ __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ }
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
/* fall through */
case 3:
+ if (etd->hw_version == 3)
+ elantech_set_buttonpad_prop(psmouse);
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
if (etd->reports_pressure) {
@@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
*/
psmouse_warn(psmouse, "couldn't query resolution data.\n");
}
- /* v4 is clickpad, with only one button. */
- __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
- __clear_bit(BTN_RIGHT, dev->keybit);
+ elantech_set_buttonpad_prop(psmouse);
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9..d8d49d1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
+
+static const int *quirk_min_max;
+
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+ {
+ /* Lenovo ThinkPad Helix */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ .driver_data = (int []){1024, 5052, 2258, 4832},
+ },
+ {
+ /* Lenovo ThinkPad X240 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
+ const struct dmi_system_id *min_max_dmi;
+
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 4c842c3..b604564 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -67,7 +67,6 @@ struct mousedev {
struct device dev;
struct cdev cdev;
bool exist;
- bool is_mixdev;
struct list_head mixdev_node;
bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
+
+ int (*open_device)(struct mousedev *mousedev);
+ void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
if (retval)
return retval;
- if (mousedev->is_mixdev)
- mixdev_open_devices();
- else if (!mousedev->exist)
+ if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
- if (mousedev->is_mixdev)
- mixdev_close_devices();
- else if (mousedev->exist && !--mousedev->open)
+ if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ int error;
+
+ error = mutex_lock_interruptible(&mixdev->mutex);
+ if (error)
+ return error;
- if (mousedev_mix->open++)
- return;
+ if (!mixdev->open++) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (!mousedev->opened_by_mixdev) {
- if (mousedev_open_device(mousedev))
- continue;
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (!mousedev->opened_by_mixdev) {
+ if (mousedev_open_device(mousedev))
+ continue;
- mousedev->opened_by_mixdev = true;
+ mousedev->opened_by_mixdev = true;
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
+ return 0;
}
/*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
{
- struct mousedev *mousedev;
+ mutex_lock(&mixdev->mutex);
- if (--mousedev_mix->open)
- return;
+ if (!--mixdev->open) {
+ struct mousedev *mousedev;
- list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
- if (mousedev->opened_by_mixdev) {
- mousedev->opened_by_mixdev = false;
- mousedev_close_device(mousedev);
+ list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+ if (mousedev->opened_by_mixdev) {
+ mousedev->opened_by_mixdev = false;
+ mousedev_close_device(mousedev);
+ }
}
}
+
+ mutex_unlock(&mixdev->mutex);
}
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
mousedev_detach_client(mousedev, client);
kfree(client);
- mousedev_close_device(mousedev);
+ mousedev->close_device(mousedev);
return 0;
}
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
- error = mousedev_open_device(mousedev);
+ error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
+
+ mousedev->open_device = mixdev_open_devices;
+ mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+ mousedev->open_device = mousedev_open_device;
+ mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
- mousedev->is_mixdev = mixdev;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
- if (!mousedev->is_mixdev)
+ if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index e53416a..44a1fb6 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
struct usb_device *dev = interface_to_usbdev(intf);
char limit = 0;
/* result has to be defined as int for some devices */
- int result = 0;
+ int result = 0, touch_max = 0;
int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
unsigned char *report;
@@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
-
+ /* touch device at least supports one touch point */
+ touch_max = 1;
switch (features->type) {
case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
@@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
}
out:
+ if (!features->touch_max && touch_max)
+ features->touch_max = touch_max;
result = 0;
kfree(report);
return result;
@@ -719,7 +722,7 @@ static int wacom_led_control(struct wacom *wacom)
return -ENOMEM;
if (wacom->wacom_wac.features.type >= INTUOS5S &&
- wacom->wacom_wac.features.type <= INTUOS5L) {
+ wacom->wacom_wac.features.type <= INTUOSPL) {
/*
* Touch Ring and crop mark LED luminance may take on
* one of four values:
@@ -981,6 +984,9 @@ static int wacom_initialize_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
wacom->led.select[0] = 0;
wacom->led.select[1] = 0;
wacom->led.llv = 32;
@@ -1024,6 +1030,9 @@ static void wacom_destroy_leds(struct wacom *wacom)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
sysfs_remove_group(&wacom->intf->dev.kobj,
&intuos5_led_attr_group);
break;
@@ -1302,7 +1311,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
* HID descriptor. If this is the touch interface (wMaxPacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
- if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
features->pktlen = WACOM_PKGLEN_BBTOUCH3;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c59b797..0091bde 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -621,14 +621,14 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else {
input_report_abs(input, ABS_MISC, 0);
}
- } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
int i;
/* Touch ring mode switch has no capacitive sensor */
input_report_key(input, BTN_0, (data[3] & 0x01));
/*
- * ExpressKeys on Intuos5 have a capacitive sensor in
+ * ExpressKeys on Intuos5/Intuos Pro have a capacitive sensor in
* addition to the mechanical switch. Switch data is
* stored in data[4], capacitive data in data[5].
*/
@@ -716,7 +716,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
features->type == INTUOS4 ||
features->type == INTUOS4S ||
features->type == INTUOS5 ||
- features->type == INTUOS5S)) {
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) {
return 0;
}
@@ -769,8 +771,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
+ if (features->type >= INTUOS4S && features->type <= INTUOSPL) {
input_report_key(input, BTN_LEFT, data[6] & 0x01);
input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
input_report_key(input, BTN_RIGHT, data[6] & 0x04);
@@ -797,7 +798,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
- features->type == INTUOS4L || features->type == INTUOS5L) &&
+ features->type == INTUOS4L || features->type == INTUOS5L ||
+ features->type == INTUOSPL) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
input_report_key(input, BTN_LEFT, data[8] & 0x01);
@@ -1107,6 +1109,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
+ struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
bool touch = data[1] & 0x80;
int slot = input_mt_get_slot_by_key(input, data[0]);
@@ -1122,14 +1125,23 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
if (touch) {
int x = (data[2] << 4) | (data[4] >> 4);
int y = (data[3] << 4) | (data[4] & 0x0f);
- int a = data[5];
+ int width, height;
- // "a" is a scaled-down area which we assume is roughly
- // circular and which can be described as: a=(pi*r^2)/C.
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- int width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
- int height = width * y_res / x_res;
+ if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+ width = data[5];
+ height = data[6];
+ } else {
+ /*
+ * "a" is a scaled-down area which we assume is
+ * roughly circular and which can be described as:
+ * a=(pi*r^2)/C.
+ */
+ int a = data[5];
+ int x_res = input_abs_get_res(input, ABS_X);
+ int y_res = input_abs_get_res(input, ABS_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ height = width * y_res / x_res;
+ }
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
@@ -1337,6 +1349,9 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case INTUOS5S:
case INTUOS5:
case INTUOS5L:
+ case INTUOSPS:
+ case INTUOSPM:
+ case INTUOSPL:
if (len == WACOM_PKGLEN_BBTOUCH3)
sync = wacom_bpt3_touch(wacom_wac);
else
@@ -1420,7 +1435,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type >= WIRELESS ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOSPL) ||
(features->oVid && features->oPid))
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
@@ -1627,6 +1642,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS5:
case INTUOS5L:
+ case INTUOSPM:
+ case INTUOSPL:
if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_7, input_dev->keybit);
__set_bit(BTN_8, input_dev->keybit);
@@ -1634,6 +1651,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS5S:
+ case INTUOSPS:
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_PEN) {
@@ -1952,6 +1970,18 @@ static const struct wacom_features wacom_features_0x29 =
static const struct wacom_features wacom_features_0x2A =
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x314 =
+ { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x315 =
+ { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x317 =
+ { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
+ 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2259,6 +2289,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) },
+ { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index dfc9e08..d6dec58 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -76,6 +76,9 @@ enum {
INTUOS5S,
INTUOS5,
INTUOS5L,
+ INTUOSPS,
+ INTUOSPM,
+ INTUOSPL,
WACOM_21UX2,
WACOM_22HD,
DTK,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f0..5d2edb4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
address &= ~(0xfffULL);
cmd->data[0] = devid;
- cmd->data[0] |= (pasid & 0xff) << 16;
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
- cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+ cmd->data[1] |= (pasid & 0xff) << 16;
cmd->data[2] = lower_32_bits(address);
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
cmd->data[3] = upper_32_bits(address);
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 02125e6..5a4da94 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
static void
isdnloop_fake_err(isdnloop_card *card)
{
- char buf[60];
+ char buf[64];
- sprintf(buf, "E%s", card->omsg);
+ snprintf(buf, sizeof(buf), "E%s", card->omsg);
isdnloop_fake(card, buf, -1);
isdnloop_fake(card, "NAK", -1);
}
@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
case 7:
/* 0x;EAZ */
p += 3;
+ if (strlen(p) >= sizeof(card->eazlist[0]))
+ break;
strcpy(card->eazlist[ch - 1], p);
break;
case 8:
@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
return -EBUSY;
if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
return -EFAULT;
+
+ for (i = 0; i < 3; i++) {
+ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&card->isdnloop_lock, flags);
switch (sdef.ptype) {
case ISDN_PTYPE_EURO:
@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
{
ulong a;
int i;
- char cbuf[60];
+ char cbuf[80];
isdn_ctrl cmd;
isdnloop_cdef cdef;
@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
break;
if ((c->arg & 255) < ISDNLOOP_BCH) {
char *p;
- char dial[50];
char dcode[4];
a = c->arg;
@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
} else
/* Normal Dial */
strcpy(dcode, "CAL");
- strcpy(dial, p);
- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
- dcode, dial, c->parm.setup.si1,
- c->parm.setup.si2, c->parm.setup.eazmsn);
+ snprintf(cbuf, sizeof(cbuf),
+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+ dcode, p, c->parm.setup.si1,
+ c->parm.setup.si2, c->parm.setup.eazmsn);
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 3d7245d..49794b4 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
- depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 5eb76dd..f950c9d 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
config BCACHE
tristate "Block device as cache"
- depends on !PREEMPT_RT_FULL
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c147f06..a562d5a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1813,14 +1813,14 @@ static void dm_request_fn(struct request_queue *q)
if (map_request(ti, clone, md))
goto requeued;
- BUG_ON_NONRT(!irqs_disabled());
+ BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
}
goto out;
requeued:
- BUG_ON_NONRT(!irqs_disabled());
+ BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
delay_and_out:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ab00c1e..3ecfb06 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1541,9 +1541,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct raid5_percpu *percpu;
unsigned long cpu;
- cpu = get_cpu_light();
+ cpu = get_cpu();
percpu = per_cpu_ptr(conf->percpu, cpu);
- spin_lock(&percpu->lock);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
@@ -1595,8 +1594,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
- spin_unlock(&percpu->lock);
- put_cpu_light();
+ put_cpu();
}
static int grow_one_stripe(struct r5conf *conf)
@@ -5458,7 +5456,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
__func__, cpu);
break;
}
- spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
}
put_online_cpus();
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 0a5e1e1..2113ffa 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -444,7 +444,6 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
- spinlock_t lock; /* Protection for -RT */
struct page *spare_page; /* Used when checking P/Q in raid6 */
void *scribble; /* space for constructing buffer
* lists and performing address
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 6386ced..91c694b 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
struct i2c_client *c;
u8 eedata[256];
+ memset(tv, 0, sizeof(*tv));
+
c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return;
strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
c->adapter = &cx->i2c_adap[0];
c->addr = 0xa0 >> 1;
- memset(tv, 0, sizeof(*tv));
if (tveeprom_read(c, eedata, sizeof(eedata)))
goto ret;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 20e345d..a1c641e 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
@@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (1 + msg[i].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = 0;
obuf[1] = msg[i].len;
@@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (3 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
if (1 + msg[i + 1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[i + 1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].len;
obuf[1] = msg[i+1].len;
@@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[i].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[i].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[i].addr;
obuf[1] = msg[i].len;
@@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
+ if (i == num)
+ ret = num;
+ else
+ ret = -EREMOTEIO;
+
+unlock:
mutex_unlock(&d->i2c_mutex);
- return i == num ? num : -EREMOTEIO;
+ return ret;
}
static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 71b22f5..4170a45 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -301,6 +301,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
if (!d)
return -ENODEV;
@@ -316,7 +317,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -340,7 +342,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -357,7 +360,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
@@ -386,15 +390,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
break;
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -430,7 +436,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0xc3,
@@ -466,7 +473,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].addr << 1;
@@ -481,15 +489,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
int i;
if (!d)
@@ -506,7 +517,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -530,7 +542,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[0].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
@@ -556,9 +569,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
msg[i].flags == 0 ? ">>>" : "<<<");
debug_dump(msg[i].buf, msg[i].len, deb_xfer);
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
@@ -566,7 +581,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
- int len, i, j;
+ int len, i, j, ret;
if (!d)
return -ENODEV;
@@ -618,7 +633,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[j].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
dw210x_op_rw(d->udev, 0x91, 0, 0,
@@ -652,7 +668,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j + 1].len;
@@ -671,7 +688,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (2 + msg[j].len > sizeof(obuf)) {
warn("i2c wr: len=%d is too big!\n",
msg[j].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
@@ -685,9 +703,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
}
}
+ ret = num;
+unlock:
mutex_unlock(&d->i2c_mutex);
- return num;
+ return ret;
}
static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e9b20e8..8dacd4c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -63,7 +63,6 @@ config ATMEL_PWM
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
- default y if PREEMPT_RT_FULL
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
@@ -79,7 +78,8 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
- may be used as a clock event device supporting oneshot mode.
+ may be used as a clock event device supporting oneshot mode
+ (delays of up to two seconds) based on the 32 KiHz clock.
config ATMEL_TCB_CLKSRC_BLOCK
int
@@ -93,15 +93,6 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.
-config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- bool "TC Block use 32 KiHz clock"
- depends on ATMEL_TCB_CLKSRC
- default y if !PREEMPT_RT_FULL
- help
- Select this to use 32 KiHz base clock rate as TC block clock
- source for clock events.
-
-
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n
@@ -131,35 +122,6 @@ config IBM_ASM
for information on the specific driver level and support statement
for your IBM server.
-config HWLAT_DETECTOR
- tristate "Testing module to detect hardware-induced latencies"
- depends on DEBUG_FS
- depends on RING_BUFFER
- default m
- ---help---
- A simple hardware latency detector. Use this module to detect
- large latencies introduced by the behavior of the underlying
- system firmware external to Linux. We do this using periodic
- use of stop_machine to grab all available CPUs and measure
- for unexplainable gaps in the CPU timestamp counter(s). By
- default, the module is not enabled until the "enable" file
- within the "hwlat_detector" debugfs directory is toggled.
-
- This module is often used to detect SMI (System Management
- Interrupts) on x86 systems, though is not x86 specific. To
- this end, we default to using a sample window of 1 second,
- during which we will sample for 0.5 seconds. If an SMI or
- similar event occurs during that time, it is recorded
- into an 8K samples global ring buffer until retreived.
-
- WARNING: This software should never be enabled (it can be built
- but should not be turned on after it is loaded) in a production
- environment where high latencies are a concern since the
- sampling mechanism actually introduces latencies for
- regular tasks while the CPU(s) are being held.
-
- If unsure, say N
-
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 6bb08da40..c235d5b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,4 +53,3 @@ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
-obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
deleted file mode 100644
index 2429c43..0000000
--- a/drivers/misc/hwlat_detector.c
+++ /dev/null
@@ -1,1240 +0,0 @@
-/*
- * hwlat_detector.c - A simple Hardware Latency detector.
- *
- * Use this module to detect large system latencies induced by the behavior of
- * certain underlying system hardware or firmware, independent of Linux itself.
- * The code was developed originally to detect the presence of SMIs on Intel
- * and AMD systems, although there is no dependency upon x86 herein.
- *
- * The classical example usage of this module is in detecting the presence of
- * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
- * somewhat special form of hardware interrupt spawned from earlier CPU debug
- * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
- * LPC (or other device) to generate a special interrupt under certain
- * circumstances, for example, upon expiration of a special SMI timer device,
- * due to certain external thermal readings, on certain I/O address accesses,
- * and other situations. An SMI hits a special CPU pin, triggers a special
- * SMI mode (complete with special memory map), and the OS is unaware.
- *
- * Although certain hardware-inducing latencies are necessary (for example,
- * a modern system often requires an SMI handler for correct thermal control
- * and remote management) they can wreak havoc upon any OS-level performance
- * guarantees toward low-latency, especially when the OS is not even made
- * aware of the presence of these interrupts. For this reason, we need a
- * somewhat brute force mechanism to detect these interrupts. In this case,
- * we do it by hogging all of the CPU(s) for configurable timer intervals,
- * sampling the built-in CPU timer, looking for discontiguous readings.
- *
- * WARNING: This implementation necessarily introduces latencies. Therefore,
- * you should NEVER use this module in a production environment
- * requiring any kind of low-latency performance guarantee(s).
- *
- * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
- *
- * Includes useful feedback from Clark Williams <clark@redhat.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ring_buffer.h>
-#include <linux/time.h>
-#include <linux/hrtimer.h>
-#include <linux/kthread.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/version.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/trace_clock.h>
-
-#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
-#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
-#define U64STR_SIZE 22 /* 20 digits max */
-
-#define VERSION "1.0.0"
-#define BANNER "hwlat_detector: "
-#define DRVNAME "hwlat_detector"
-#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
-#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
-#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
-
-/* Module metadata */
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
-MODULE_DESCRIPTION("A simple hardware latency detector");
-MODULE_VERSION(VERSION);
-
-/* Module parameters */
-
-static int debug;
-static int enabled;
-static int threshold;
-
-module_param(debug, int, 0); /* enable debug */
-module_param(enabled, int, 0); /* enable detector */
-module_param(threshold, int, 0); /* latency threshold */
-
-/* Buffering and sampling */
-
-static struct ring_buffer *ring_buffer; /* sample buffer */
-static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
-static unsigned long buf_size = BUF_SIZE_DEFAULT;
-static struct task_struct *kthread; /* sampling thread */
-
-/* DebugFS filesystem entries */
-
-static struct dentry *debug_dir; /* debugfs directory */
-static struct dentry *debug_max; /* maximum TSC delta */
-static struct dentry *debug_count; /* total detect count */
-static struct dentry *debug_sample_width; /* sample width us */
-static struct dentry *debug_sample_window; /* sample window us */
-static struct dentry *debug_sample; /* raw samples us */
-static struct dentry *debug_threshold; /* threshold us */
-static struct dentry *debug_enable; /* enable/disable */
-
-/* Individual samples and global state */
-
-struct sample; /* latency sample */
-struct data; /* Global state */
-
-/* Sampling functions */
-static int __buffer_add_sample(struct sample *sample);
-static struct sample *buffer_get_sample(struct sample *sample);
-
-/* Threading and state */
-static int kthread_fn(void *unused);
-static int start_kthread(void);
-static int stop_kthread(void);
-static void __reset_stats(void);
-static int init_stats(void);
-
-/* Debugfs interface */
-static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos, const u64 *entry);
-static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos, u64 *entry);
-static int debug_sample_fopen(struct inode *inode, struct file *filp);
-static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos);
-static int debug_sample_release(struct inode *inode, struct file *filp);
-static int debug_enable_fopen(struct inode *inode, struct file *filp);
-static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos);
-static ssize_t debug_enable_fwrite(struct file *file,
- const char __user *user_buffer,
- size_t user_size, loff_t *offset);
-
-/* Initialization functions */
-static int init_debugfs(void);
-static void free_debugfs(void);
-static int detector_init(void);
-static void detector_exit(void);
-
-/* Individual latency samples are stored here when detected and packed into
- * the ring_buffer circular buffer, where they are overwritten when
- * more than buf_size/sizeof(sample) samples are received. */
-struct sample {
- u64 seqnum; /* unique sequence */
- u64 duration; /* ktime delta */
- u64 outer_duration; /* ktime delta (outer loop) */
- struct timespec timestamp; /* wall time */
- unsigned long lost;
-};
-
-/* keep the global state somewhere. */
-static struct data {
-
- struct mutex lock; /* protect changes */
-
- u64 count; /* total since reset */
- u64 max_sample; /* max hardware latency */
- u64 threshold; /* sample threshold level */
-
- u64 sample_window; /* total sampling window (on+off) */
- u64 sample_width; /* active sampling portion of window */
-
- atomic_t sample_open; /* whether the sample file is open */
-
- wait_queue_head_t wq; /* waitqeue for new sample values */
-
-} data;
-
-/**
- * __buffer_add_sample - add a new latency sample recording to the ring buffer
- * @sample: The new latency sample value
- *
- * This receives a new latency sample and records it in a global ring buffer.
- * No additional locking is used in this case.
- */
-static int __buffer_add_sample(struct sample *sample)
-{
- return ring_buffer_write(ring_buffer,
- sizeof(struct sample), sample);
-}
-
-/**
- * buffer_get_sample - remove a hardware latency sample from the ring buffer
- * @sample: Pre-allocated storage for the sample
- *
- * This retrieves a hardware latency sample from the global circular buffer
- */
-static struct sample *buffer_get_sample(struct sample *sample)
-{
- struct ring_buffer_event *e = NULL;
- struct sample *s = NULL;
- unsigned int cpu = 0;
-
- if (!sample)
- return NULL;
-
- mutex_lock(&ring_buffer_mutex);
- for_each_online_cpu(cpu) {
- e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
- if (e)
- break;
- }
-
- if (e) {
- s = ring_buffer_event_data(e);
- memcpy(sample, s, sizeof(struct sample));
- } else
- sample = NULL;
- mutex_unlock(&ring_buffer_mutex);
-
- return sample;
-}
-
-#ifndef CONFIG_TRACING
-#define time_type ktime_t
-#define time_get() ktime_get()
-#define time_to_us(x) ktime_to_us(x)
-#define time_sub(a, b) ktime_sub(a, b)
-#define init_time(a, b) (a).tv64 = b
-#define time_u64(a) ((a).tv64)
-#else
-#define time_type u64
-#define time_get() trace_clock_local()
-#define time_to_us(x) div_u64(x, 1000)
-#define time_sub(a, b) ((a) - (b))
-#define init_time(a, b) (a = b)
-#define time_u64(a) a
-#endif
-/**
- * get_sample - sample the CPU TSC and look for likely hardware latencies
- *
- * Used to repeatedly capture the CPU TSC (or similar), looking for potential
- * hardware-induced latency. Called with interrupts disabled and with
- * data.lock held.
- */
-static int get_sample(void)
-{
- time_type start, t1, t2, last_t2;
- s64 diff, total = 0;
- u64 sample = 0;
- u64 outer_sample = 0;
- int ret = -1;
-
- init_time(last_t2, 0);
- start = time_get(); /* start timestamp */
-
- do {
-
- t1 = time_get(); /* we'll look for a discontinuity */
- t2 = time_get();
-
- if (time_u64(last_t2)) {
- /* Check the delta from outer loop (t2 to next t1) */
- diff = time_to_us(time_sub(t1, last_t2));
- /* This shouldn't happen */
- if (diff < 0) {
- pr_err(BANNER "time running backwards\n");
- goto out;
- }
- if (diff > outer_sample)
- outer_sample = diff;
- }
- last_t2 = t2;
-
- total = time_to_us(time_sub(t2, start)); /* sample width */
-
- /* This checks the inner loop (t1 to t2) */
- diff = time_to_us(time_sub(t2, t1)); /* current diff */
-
- /* This shouldn't happen */
- if (diff < 0) {
- pr_err(BANNER "time running backwards\n");
- goto out;
- }
-
- if (diff > sample)
- sample = diff; /* only want highest value */
-
- } while (total <= data.sample_width);
-
- ret = 0;
-
- /* If we exceed the threshold value, we have found a hardware latency */
- if (sample > data.threshold || outer_sample > data.threshold) {
- struct sample s;
-
- ret = 1;
-
- data.count++;
- s.seqnum = data.count;
- s.duration = sample;
- s.outer_duration = outer_sample;
- s.timestamp = CURRENT_TIME;
- __buffer_add_sample(&s);
-
- /* Keep a running maximum ever recorded hardware latency */
- if (sample > data.max_sample)
- data.max_sample = sample;
- }
-
-out:
- return ret;
-}
-
-/*
- * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
- * @unused: A required part of the kthread API.
- *
- * Used to periodically sample the CPU TSC via a call to get_sample. We
- * disable interrupts, which does (intentionally) introduce latency since we
- * need to ensure nothing else might be running (and thus pre-empting).
- * Obviously this should never be used in production environments.
- *
- * Currently this runs on which ever CPU it was scheduled on, but most
- * real-worald hardware latency situations occur across several CPUs,
- * but we might later generalize this if we find there are any actualy
- * systems with alternate SMI delivery or other hardware latencies.
- */
-static int kthread_fn(void *unused)
-{
- int ret;
- u64 interval;
-
- while (!kthread_should_stop()) {
-
- mutex_lock(&data.lock);
-
- local_irq_disable();
- ret = get_sample();
- local_irq_enable();
-
- if (ret > 0)
- wake_up(&data.wq); /* wake up reader(s) */
-
- interval = data.sample_window - data.sample_width;
- do_div(interval, USEC_PER_MSEC); /* modifies interval value */
-
- mutex_unlock(&data.lock);
-
- if (msleep_interruptible(interval))
- break;
- }
-
- return 0;
-}
-
-/**
- * start_kthread - Kick off the hardware latency sampling/detector kthread
- *
- * This starts a kernel thread that will sit and sample the CPU timestamp
- * counter (TSC or similar) and look for potential hardware latencies.
- */
-static int start_kthread(void)
-{
- kthread = kthread_run(kthread_fn, NULL,
- DRVNAME);
- if (IS_ERR(kthread)) {
- pr_err(BANNER "could not start sampling thread\n");
- enabled = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * stop_kthread - Inform the hardware latency samping/detector kthread to stop
- *
- * This kicks the running hardware latency sampling/detector kernel thread and
- * tells it to stop sampling now. Use this on unload and at system shutdown.
- */
-static int stop_kthread(void)
-{
- int ret;
-
- ret = kthread_stop(kthread);
-
- return ret;
-}
-
-/**
- * __reset_stats - Reset statistics for the hardware latency detector
- *
- * We use data to store various statistics and global state. We call this
- * function in order to reset those when "enable" is toggled on or off, and
- * also at initialization. Should be called with data.lock held.
- */
-static void __reset_stats(void)
-{
- data.count = 0;
- data.max_sample = 0;
- ring_buffer_reset(ring_buffer); /* flush out old sample entries */
-}
-
-/**
- * init_stats - Setup global state statistics for the hardware latency detector
- *
- * We use data to store various statistics and global state. We also use
- * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
- * induced system latencies. This function initializes these structures and
- * allocates the global ring buffer also.
- */
-static int init_stats(void)
-{
- int ret = -ENOMEM;
-
- mutex_init(&data.lock);
- init_waitqueue_head(&data.wq);
- atomic_set(&data.sample_open, 0);
-
- ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
-
- if (WARN(!ring_buffer, KERN_ERR BANNER
- "failed to allocate ring buffer!\n"))
- goto out;
-
- __reset_stats();
- data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
- data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
- data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
-
- ret = 0;
-
-out:
- return ret;
-
-}
-
-/*
- * simple_data_read - Wrapper read function for global state debugfs entries
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- * @entry: The entry to read from
- *
- * This function provides a generic read implementation for the global state
- * "data" structure debugfs filesystem entries. It would be nice to use
- * simple_attr_read directly, but we need to make sure that the data.lock
- * is held during the actual read.
- */
-static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos, const u64 *entry)
-{
- char buf[U64STR_SIZE];
- u64 val = 0;
- int len = 0;
-
- memset(buf, 0, sizeof(buf));
-
- if (!entry)
- return -EFAULT;
-
- mutex_lock(&data.lock);
- val = *entry;
- mutex_unlock(&data.lock);
-
- len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
-
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
-
-}
-
-/*
- * simple_data_write - Wrapper write function for global state debugfs entries
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to write value from
- * @cnt: The maximum number of bytes to write
- * @ppos: The current "file" position
- * @entry: The entry to write to
- *
- * This function provides a generic write implementation for the global state
- * "data" structure debugfs filesystem entries. It would be nice to use
- * simple_attr_write directly, but we need to make sure that the data.lock
- * is held during the actual write.
- */
-static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos, u64 *entry)
-{
- char buf[U64STR_SIZE];
- int csize = min(cnt, sizeof(buf));
- u64 val = 0;
- int err = 0;
-
- memset(buf, '\0', sizeof(buf));
- if (copy_from_user(buf, ubuf, csize))
- return -EFAULT;
-
- buf[U64STR_SIZE-1] = '\0'; /* just in case */
- err = kstrtoull(buf, 10, &val);
- if (err)
- return -EINVAL;
-
- mutex_lock(&data.lock);
- *entry = val;
- mutex_unlock(&data.lock);
-
- return csize;
-}
-
-/**
- * debug_count_fopen - Open function for "count" debugfs entry
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "count" debugfs
- * interface to the hardware latency detector.
- */
-static int debug_count_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_count_fread - Read function for "count" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "count" debugfs
- * interface to the hardware latency detector. Can be used to read the
- * number of latency readings exceeding the configured threshold since
- * the detector was last reset (e.g. by writing a zero into "count").
- */
-static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
-}
-
-/**
- * debug_count_fwrite - Write function for "count" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "count" debugfs
- * interface to the hardware latency detector. Can be used to write a
- * desired value, especially to zero the total count.
- */
-static ssize_t debug_count_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
-}
-
-/**
- * debug_enable_fopen - Dummy open function for "enable" debugfs interface
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "enable" debugfs
- * interface to the hardware latency detector.
- */
-static int debug_enable_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_enable_fread - Read function for "enable" debugfs interface
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "enable" debugfs
- * interface to the hardware latency detector. Can be used to determine
- * whether the detector is currently enabled ("0\n" or "1\n" returned).
- */
-static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[4];
-
- if ((cnt < sizeof(buf)) || (*ppos))
- return 0;
-
- buf[0] = enabled ? '1' : '0';
- buf[1] = '\n';
- buf[2] = '\0';
- if (copy_to_user(ubuf, buf, strlen(buf)))
- return -EFAULT;
- return *ppos = strlen(buf);
-}
-
-/**
- * debug_enable_fwrite - Write function for "enable" debugfs interface
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "enable" debugfs
- * interface to the hardware latency detector. Can be used to enable or
- * disable the detector, which will have the side-effect of possibly
- * also resetting the global stats and kicking off the measuring
- * kthread (on an enable) or the converse (upon a disable).
- */
-static ssize_t debug_enable_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- char buf[4];
- int csize = min(cnt, sizeof(buf));
- long val = 0;
- int err = 0;
-
- memset(buf, '\0', sizeof(buf));
- if (copy_from_user(buf, ubuf, csize))
- return -EFAULT;
-
- buf[sizeof(buf)-1] = '\0'; /* just in case */
- err = kstrtoul(buf, 10, &val);
- if (0 != err)
- return -EINVAL;
-
- if (val) {
- if (enabled)
- goto unlock;
- enabled = 1;
- __reset_stats();
- if (start_kthread())
- return -EFAULT;
- } else {
- if (!enabled)
- goto unlock;
- enabled = 0;
- err = stop_kthread();
- if (err) {
- pr_err(BANNER "cannot stop kthread\n");
- return -EFAULT;
- }
- wake_up(&data.wq); /* reader(s) should return */
- }
-unlock:
- return csize;
-}
-
-/**
- * debug_max_fopen - Open function for "max" debugfs entry
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "max" debugfs
- * interface to the hardware latency detector.
- */
-static int debug_max_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_max_fread - Read function for "max" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "max" debugfs
- * interface to the hardware latency detector. Can be used to determine
- * the maximum latency value observed since it was last reset.
- */
-static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
-}
-
-/**
- * debug_max_fwrite - Write function for "max" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "max" debugfs
- * interface to the hardware latency detector. Can be used to reset the
- * maximum or set it to some other desired value - if, then, subsequent
- * measurements exceed this value, the maximum will be updated.
- */
-static ssize_t debug_max_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
-}
-
-
-/**
- * debug_sample_fopen - An open function for "sample" debugfs interface
- * @inode: The in-kernel inode representation of this debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function handles opening the "sample" file within the hardware
- * latency detector debugfs directory interface. This file is used to read
- * raw samples from the global ring_buffer and allows the user to see a
- * running latency history. Can be opened blocking or non-blocking,
- * affecting whether it behaves as a buffer read pipe, or does not.
- * Implements simple locking to prevent multiple simultaneous use.
- */
-static int debug_sample_fopen(struct inode *inode, struct file *filp)
-{
- if (!atomic_add_unless(&data.sample_open, 1, 1))
- return -EBUSY;
- else
- return 0;
-}
-
-/**
- * debug_sample_fread - A read function for "sample" debugfs interface
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that will contain the samples read
- * @cnt: The maximum bytes to read from the debugfs "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function handles reading from the "sample" file within the hardware
- * latency detector debugfs directory interface. This file is used to read
- * raw samples from the global ring_buffer and allows the user to see a
- * running latency history. By default this will block pending a new
- * value written into the sample buffer, unless there are already a
- * number of value(s) waiting in the buffer, or the sample file was
- * previously opened in a non-blocking mode of operation.
- */
-static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- int len = 0;
- char buf[64];
- struct sample *sample = NULL;
-
- if (!enabled)
- return 0;
-
- sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
- if (!sample)
- return -ENOMEM;
-
- while (!buffer_get_sample(sample)) {
-
- DEFINE_WAIT(wait);
-
- if (filp->f_flags & O_NONBLOCK) {
- len = -EAGAIN;
- goto out;
- }
-
- prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
- schedule();
- finish_wait(&data.wq, &wait);
-
- if (signal_pending(current)) {
- len = -EINTR;
- goto out;
- }
-
- if (!enabled) { /* enable was toggled */
- len = 0;
- goto out;
- }
- }
-
- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
- sample->timestamp.tv_sec,
- sample->timestamp.tv_nsec,
- sample->duration,
- sample->outer_duration);
-
-
- /* handling partial reads is more trouble than it's worth */
- if (len > cnt)
- goto out;
-
- if (copy_to_user(ubuf, buf, len))
- len = -EFAULT;
-
-out:
- kfree(sample);
- return len;
-}
-
-/**
- * debug_sample_release - Release function for "sample" debugfs interface
- * @inode: The in-kernel inode represenation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function completes the close of the debugfs interface "sample" file.
- * Frees the sample_open "lock" so that other users may open the interface.
- */
-static int debug_sample_release(struct inode *inode, struct file *filp)
-{
- atomic_dec(&data.sample_open);
-
- return 0;
-}
-
-/**
- * debug_threshold_fopen - Open function for "threshold" debugfs entry
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "threshold" debugfs
- * interface to the hardware latency detector.
- */
-static int debug_threshold_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_threshold_fread - Read function for "threshold" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "threshold" debugfs
- * interface to the hardware latency detector. It can be used to determine
- * the current threshold level at which a latency will be recorded in the
- * global ring buffer, typically on the order of 10us.
- */
-static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
-}
-
-/**
- * debug_threshold_fwrite - Write function for "threshold" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "threshold" debugfs
- * interface to the hardware latency detector. It can be used to configure
- * the threshold level at which any subsequently detected latencies will
- * be recorded into the global ring buffer.
- */
-static ssize_t debug_threshold_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- int ret;
-
- ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
-
- if (enabled)
- wake_up_process(kthread);
-
- return ret;
-}
-
-/**
- * debug_width_fopen - Open function for "width" debugfs entry
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "width" debugfs
- * interface to the hardware latency detector.
- */
-static int debug_width_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_width_fread - Read function for "width" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "width" debugfs
- * interface to the hardware latency detector. It can be used to determine
- * for how many us of the total window us we will actively sample for any
- * hardware-induced latecy periods. Obviously, it is not possible to
- * sample constantly and have the system respond to a sample reader, or,
- * worse, without having the system appear to have gone out to lunch.
- */
-static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
-}
-
-/**
- * debug_width_fwrite - Write function for "width" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "width" debugfs
- * interface to the hardware latency detector. It can be used to configure
- * for how many us of the total window us we will actively sample for any
- * hardware-induced latency periods. Obviously, it is not possible to
- * sample constantly and have the system respond to a sample reader, or,
- * worse, without having the system appear to have gone out to lunch. It
- * is enforced that width is less that the total window size.
- */
-static ssize_t debug_width_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- char buf[U64STR_SIZE];
- int csize = min(cnt, sizeof(buf));
- u64 val = 0;
- int err = 0;
-
- memset(buf, '\0', sizeof(buf));
- if (copy_from_user(buf, ubuf, csize))
- return -EFAULT;
-
- buf[U64STR_SIZE-1] = '\0'; /* just in case */
- err = kstrtoull(buf, 10, &val);
- if (0 != err)
- return -EINVAL;
-
- mutex_lock(&data.lock);
- if (val < data.sample_window)
- data.sample_width = val;
- else {
- mutex_unlock(&data.lock);
- return -EINVAL;
- }
- mutex_unlock(&data.lock);
-
- if (enabled)
- wake_up_process(kthread);
-
- return csize;
-}
-
-/**
- * debug_window_fopen - Open function for "window" debugfs entry
- * @inode: The in-kernel inode representation of the debugfs "file"
- * @filp: The active open file structure for the debugfs "file"
- *
- * This function provides an open implementation for the "window" debugfs
- * interface to the hardware latency detector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs.
- */
-static int debug_window_fopen(struct inode *inode, struct file *filp)
-{
- return 0;
-}
-
-/**
- * debug_window_fread - Read function for "window" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a read implementation for the "window" debugfs
- * interface to the hardware latency detector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs. Can be used to read the total window size.
- */
-static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
-}
-
-/**
- * debug_window_fwrite - Write function for "window" debugfs entry
- * @filp: The active open file structure for the debugfs "file"
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in the debugfs "file"
- *
- * This function provides a write implementation for the "window" debufds
- * interface to the hardware latency detetector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs. Can be used to write a new total window size. It
- * is enfoced that any value written must be greater than the sample width
- * size, or an error results.
- */
-static ssize_t debug_window_fwrite(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- char buf[U64STR_SIZE];
- int csize = min(cnt, sizeof(buf));
- u64 val = 0;
- int err = 0;
-
- memset(buf, '\0', sizeof(buf));
- if (copy_from_user(buf, ubuf, csize))
- return -EFAULT;
-
- buf[U64STR_SIZE-1] = '\0'; /* just in case */
- err = kstrtoull(buf, 10, &val);
- if (0 != err)
- return -EINVAL;
-
- mutex_lock(&data.lock);
- if (data.sample_width < val)
- data.sample_window = val;
- else {
- mutex_unlock(&data.lock);
- return -EINVAL;
- }
- mutex_unlock(&data.lock);
-
- return csize;
-}
-
-/*
- * Function pointers for the "count" debugfs file operations
- */
-static const struct file_operations count_fops = {
- .open = debug_count_fopen,
- .read = debug_count_fread,
- .write = debug_count_fwrite,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "enable" debugfs file operations
- */
-static const struct file_operations enable_fops = {
- .open = debug_enable_fopen,
- .read = debug_enable_fread,
- .write = debug_enable_fwrite,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "max" debugfs file operations
- */
-static const struct file_operations max_fops = {
- .open = debug_max_fopen,
- .read = debug_max_fread,
- .write = debug_max_fwrite,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "sample" debugfs file operations
- */
-static const struct file_operations sample_fops = {
- .open = debug_sample_fopen,
- .read = debug_sample_fread,
- .release = debug_sample_release,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "threshold" debugfs file operations
- */
-static const struct file_operations threshold_fops = {
- .open = debug_threshold_fopen,
- .read = debug_threshold_fread,
- .write = debug_threshold_fwrite,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "width" debugfs file operations
- */
-static const struct file_operations width_fops = {
- .open = debug_width_fopen,
- .read = debug_width_fread,
- .write = debug_width_fwrite,
- .owner = THIS_MODULE,
-};
-
-/*
- * Function pointers for the "window" debugfs file operations
- */
-static const struct file_operations window_fops = {
- .open = debug_window_fopen,
- .read = debug_window_fread,
- .write = debug_window_fwrite,
- .owner = THIS_MODULE,
-};
-
-/**
- * init_debugfs - A function to initialize the debugfs interface files
- *
- * This function creates entries in debugfs for "hwlat_detector", including
- * files to read values from the detector, current samples, and the
- * maximum sample that has been captured since the hardware latency
- * dectector was started.
- */
-static int init_debugfs(void)
-{
- int ret = -ENOMEM;
-
- debug_dir = debugfs_create_dir(DRVNAME, NULL);
- if (!debug_dir)
- goto err_debug_dir;
-
- debug_sample = debugfs_create_file("sample", 0444,
- debug_dir, NULL,
- &sample_fops);
- if (!debug_sample)
- goto err_sample;
-
- debug_count = debugfs_create_file("count", 0444,
- debug_dir, NULL,
- &count_fops);
- if (!debug_count)
- goto err_count;
-
- debug_max = debugfs_create_file("max", 0444,
- debug_dir, NULL,
- &max_fops);
- if (!debug_max)
- goto err_max;
-
- debug_sample_window = debugfs_create_file("window", 0644,
- debug_dir, NULL,
- &window_fops);
- if (!debug_sample_window)
- goto err_window;
-
- debug_sample_width = debugfs_create_file("width", 0644,
- debug_dir, NULL,
- &width_fops);
- if (!debug_sample_width)
- goto err_width;
-
- debug_threshold = debugfs_create_file("threshold", 0644,
- debug_dir, NULL,
- &threshold_fops);
- if (!debug_threshold)
- goto err_threshold;
-
- debug_enable = debugfs_create_file("enable", 0644,
- debug_dir, &enabled,
- &enable_fops);
- if (!debug_enable)
- goto err_enable;
-
- else {
- ret = 0;
- goto out;
- }
-
-err_enable:
- debugfs_remove(debug_threshold);
-err_threshold:
- debugfs_remove(debug_sample_width);
-err_width:
- debugfs_remove(debug_sample_window);
-err_window:
- debugfs_remove(debug_max);
-err_max:
- debugfs_remove(debug_count);
-err_count:
- debugfs_remove(debug_sample);
-err_sample:
- debugfs_remove(debug_dir);
-err_debug_dir:
-out:
- return ret;
-}
-
-/**
- * free_debugfs - A function to cleanup the debugfs file interface
- */
-static void free_debugfs(void)
-{
- /* could also use a debugfs_remove_recursive */
- debugfs_remove(debug_enable);
- debugfs_remove(debug_threshold);
- debugfs_remove(debug_sample_width);
- debugfs_remove(debug_sample_window);
- debugfs_remove(debug_max);
- debugfs_remove(debug_count);
- debugfs_remove(debug_sample);
- debugfs_remove(debug_dir);
-}
-
-/**
- * detector_init - Standard module initialization code
- */
-static int detector_init(void)
-{
- int ret = -ENOMEM;
-
- pr_info(BANNER "version %s\n", VERSION);
-
- ret = init_stats();
- if (0 != ret)
- goto out;
-
- ret = init_debugfs();
- if (0 != ret)
- goto err_stats;
-
- if (enabled)
- ret = start_kthread();
-
- goto out;
-
-err_stats:
- ring_buffer_free(ring_buffer);
-out:
- return ret;
-
-}
-
-/**
- * detector_exit - Standard module cleanup code
- */
-static void detector_exit(void)
-{
- int err;
-
- if (enabled) {
- enabled = 0;
- err = stop_kthread();
- if (err)
- pr_err(BANNER "cannot stop kthread\n");
- }
-
- free_debugfs();
- ring_buffer_free(ring_buffer); /* free up the ring buffer */
-
-}
-
-module_init(detector_init);
-module_exit(detector_exit);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index a0f63c9..c3785ed 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1023,12 +1023,15 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
+ unsigned long flags;
u32 status;
status = readl(base + MMCISTATUS);
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+ local_irq_save(flags);
+
do {
unsigned int remain, len;
char *buffer;
@@ -1068,6 +1071,8 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
sg_miter_stop(sg_miter);
+ local_irq_restore(flags);
+
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7bbdf63..b45b240 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -160,7 +160,6 @@ config VXLAN
config NETCONSOLE
tristate "Network console logging support"
- depends on !PREEMPT_RT_FULL
---help---
If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details.
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f428ef57..71adb69 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -694,7 +694,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (!vlan_get_tag(skb, &client_info->vlan_id))
+ if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 55dd320..ad5272b 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
#endif
@@ -1917,12 +1917,12 @@ static void vortex_tx_timeout(struct net_device *dev)
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
if (vp->full_bus_master_tx)
boomerang_interrupt(dev->irq, dev);
else
vortex_interrupt(dev->irq, dev);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 34ebd1b..a36a760 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2206,7 +2206,11 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
- spin_lock_irqsave(&adapter->tx_lock, flags);
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
+ if (netif_msg_pktdata(adapter))
+ dev_info(&adapter->pdev->dev, "tx locked\n");
+ return NETDEV_TX_LOCKED;
+ }
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d398960..1966444 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1838,7 +1838,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- spin_lock_irqsave(&adapter->tx_lock, flags);
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+ return NETDEV_TX_LOCKED;
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e838a3f..8f9e76d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2490,6 +2490,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
+ bp->fw_last_msg = msg_data;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
@@ -3982,8 +3983,23 @@ bnx2_setup_wol(struct bnx2 *bp)
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+ u32 val;
+
+ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ return;
+ }
+ /* Tell firmware not to power down the PHY yet, otherwise
+ * the chip will take a long time to respond to MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+ val | BNX2_PORT_FEATURE_ASF_ENABLED);
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+ }
}
@@ -4015,9 +4031,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
if (bp->wol)
pci_set_power_state(bp->pdev, PCI_D3hot);
- } else {
- pci_set_power_state(bp->pdev, PCI_D3hot);
+ break;
+
+ }
+ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ u32 val;
+
+ /* Tell firmware not to power down the PHY yet,
+ * otherwise the other port may not respond to
+ * MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ val &= ~BNX2_CONDITION_PM_STATE_MASK;
+ val |= BNX2_CONDITION_PM_STATE_UNPREP;
+ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
}
+ pci_set_power_state(bp->pdev, PCI_D3hot);
/* No more memory access after this point until
* device is brought back to D0.
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d2..0eb2a65 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6890,6 +6890,7 @@ struct bnx2 {
u16 fw_wr_seq;
u16 fw_drv_pulse_wr_seq;
+ u32 fw_last_msg;
int rx_max_ring;
int rx_ring_size;
@@ -7396,6 +7397,10 @@ struct bnx2_rv2p_fw_file {
#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 14a50a1..aae7ba6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17480,8 +17480,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@@ -17513,7 +17511,8 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_TSO_ECN;
}
- dev->features |= features;
+ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index bfe5c72..8061fb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1665,7 +1665,8 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- spin_lock(&q->lock);
+ if (!spin_trylock(&q->lock))
+ return NETDEV_TX_LOCKED;
reclaim_completed_tx(sge, q);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 7565b99..7790160 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,8 +1939,8 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
- pci_disable_device(pdev);
pci_set_drvdata (pdev, NULL);
+ pci_disable_device(pdev);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 63090c0..8672547 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -525,13 +525,6 @@ fec_restart(struct net_device *ndev, int duplex)
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Setup multicast filter. */
- set_multicast_list(ndev);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
@@ -652,6 +645,13 @@ fec_restart(struct net_device *ndev, int duplex)
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 091945c..9fbe4dd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -134,6 +134,7 @@ static int gfar_poll_sq(struct napi_struct *napi, int budget);
static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
@@ -1300,7 +1301,7 @@ static int gfar_suspend(struct device *dev)
if (netif_running(ndev)) {
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -1318,7 +1319,7 @@ static int gfar_suspend(struct device *dev)
unlock_rx_qs(priv);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
disable_napi(priv);
@@ -1360,7 +1361,7 @@ static int gfar_resume(struct device *dev)
/* Disable Magic Packet mode, in case something
* else woke us up.
*/
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -1372,7 +1373,7 @@ static int gfar_resume(struct device *dev)
unlock_rx_qs(priv);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
netif_device_attach(ndev);
@@ -1700,7 +1701,7 @@ void stop_gfar(struct net_device *dev)
/* Lock it down */
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -1708,7 +1709,7 @@ void stop_gfar(struct net_device *dev)
unlock_rx_qs(priv);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2386,7 +2387,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
u32 tempval;
regs = priv->gfargrp[0].regs;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_rx_qs(priv);
if (features & NETIF_F_HW_VLAN_CTAG_TX) {
@@ -2419,7 +2420,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
gfar_change_mtu(dev, dev->mtu);
unlock_rx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
@@ -2515,7 +2516,7 @@ static void gfar_align_skb(struct sk_buff *skb)
}
/* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
@@ -2938,14 +2939,10 @@ static int gfar_poll(struct napi_struct *napi, int budget)
tx_queue = priv->tx_queue[i];
/* run Tx cleanup to completion */
if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- int ret;
-
- ret = gfar_clean_tx_ring(tx_queue);
- if (ret)
- has_tx_work++;
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
}
}
- work_done += has_tx_work;
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
/* skip queue if not active */
@@ -3104,7 +3101,7 @@ static void adjust_link(struct net_device *dev)
struct phy_device *phydev = priv->phydev;
int new_state = 0;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
if (phydev->link) {
@@ -3178,7 +3175,7 @@ static void adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
/* Update the hash table based on the current list of multicast
@@ -3384,14 +3381,14 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
/* Reactivate the Tx Queues */
gfar_write(&regs->tstat, gfargrp->tstat);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 95a1f62..d3d7ede 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -501,7 +501,7 @@ static int gfar_sringparam(struct net_device *dev,
/* Halt TX and RX, and process the frames which
* have already been received
*/
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -509,7 +509,7 @@ static int gfar_sringparam(struct net_device *dev,
unlock_rx_qs(priv);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
@@ -624,7 +624,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
/* Halt TX and RX, and process the frames which
* have already been received
*/
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -632,7 +632,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
unlock_tx_qs(priv);
unlock_rx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index f0160e67..acb55af 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -68,7 +68,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_rx_qs(priv);
/* Set the new stashing value */
@@ -84,7 +84,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
gfar_write(&regs->attr, temp);
unlock_rx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
@@ -112,7 +112,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_rx_qs(priv);
if (length > priv->rx_buffer_size)
@@ -140,7 +140,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
out:
unlock_rx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
@@ -171,7 +171,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_rx_qs(priv);
if (index > priv->rx_stash_size)
@@ -189,7 +189,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
out:
unlock_rx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
@@ -219,7 +219,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
if (length > GFAR_MAX_FIFO_THRESHOLD)
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
priv->fifo_threshold = length;
@@ -230,7 +230,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
gfar_write(&regs->fifo_tx_thr, temp);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
@@ -259,7 +259,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
if (num > GFAR_MAX_FIFO_STARVE)
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
priv->fifo_starve = num;
@@ -270,7 +270,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
gfar_write(&regs->fifo_tx_starve, temp);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
@@ -300,7 +300,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
if (num > GFAR_MAX_FIFO_STARVE_OFF)
return count;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
lock_tx_qs(priv);
priv->fifo_starve_off = num;
@@ -311,7 +311,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
gfar_write(&regs->fifo_tx_starve_shutoff, temp);
unlock_tx_qs(priv);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return count;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5d41aee..6c0fd8e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -523,10 +523,21 @@ retry:
return rc;
}
+static u64 ibmveth_encode_mac_addr(u8 *mac)
+{
+ int i;
+ u64 encoded = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ encoded = (encoded << 8) | mac[i];
+
+ return encoded;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- u64 mac_address = 0;
+ u64 mac_address;
int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
@@ -580,8 +591,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
- mac_address = mac_address >> 16;
+ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -1184,8 +1194,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
- unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ u64 mcast_addr;
+ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1369,9 +1379,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, 6);
-
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1380,7 +1387,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066ba..2c636cb 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -139,7 +139,6 @@ struct ibmveth_adapter {
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
- unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e21..48f0b06 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3036,7 +3036,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef7867..9cb400c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2976,11 +2976,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 rctl, rfctl;
u32 pages = 0;
- /* Workaround Si errata on PCHx - configure jumbo frame flow */
- if ((hw->mac.type >= e1000_pch2lan) &&
- (adapter->netdev->mtu > ETH_DATA_LEN) &&
- e1000_lv_jumbo_workaround_ich8lan(hw, true))
- e_dbg("failed to enable jumbo frame workaround mode\n");
+ /* Workaround Si errata on PCHx - configure jumbo frame flow.
+ * If jumbo frames not set, program related MAC/PHY registers
+ * to h/w defaults
+ */
+ if (hw->mac.type >= e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
+ }
/* Program MC offset vector base */
rctl = er32(RCTL);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e35bac7..71d9cad 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -88,8 +88,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -121,7 +122,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@@ -665,35 +666,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PSC_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2723,12 +2695,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ else
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
- mvneta_gmac_rgmii_set(pp, 1);
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a..e0c92e0 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@ struct ks8851_net {
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
};
static int msg_enable;
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
+ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ if (ret == -EPROBE_DEFER)
+ goto err_reg;
+ } else {
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator enable fail: %d\n",
+ ret);
+ goto err_reg_en;
+ }
+ }
+
+
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
err_netdev:
free_irq(ndev->irq, ks);
-err_id:
err_irq:
+err_id:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_disable(ks->vdd_reg);
+err_reg_en:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_put(ks->vdd_reg);
+err_reg:
free_netdev(ndev);
return ret;
}
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
+ if (!IS_ERR(priv->vdd_reg)) {
+ regulator_disable(priv->vdd_reg);
+ regulator_put(priv->vdd_reg);
+ }
free_netdev(priv->netdev);
return 0;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 809d92c..51b0094 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4089,7 +4089,12 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- spin_lock_irqsave(&fifo->tx_lock, flags);
+ if (do_spin_lock)
+ spin_lock_irqsave(&fifo->tx_lock, flags);
+ else {
+ if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
+ return NETDEV_TX_LOCKED;
+ }
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index e732812..5a0f04c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2148,8 +2148,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
- spin_lock_irqsave(&tx_ring->tx_lock, flags);
-
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index fefe07b..3ccedeb 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2213,7 +2213,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq_nosync(irq);
+ disable_irq(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 34d00f5..b6b601c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -67,6 +67,9 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
/* Filters */
/**
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5b471cf..3b2356b 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -582,7 +582,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
- ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
@@ -595,7 +595,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
- ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
return -EINVAL;
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 7edf580..61a1540 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1629,8 +1629,13 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
unsigned long flags;
ENTER;
-
- spin_lock_irqsave(&priv->tx_lock, flags);
+ local_irq_save(flags);
+ if (!spin_trylock(&priv->tx_lock)) {
+ local_irq_restore(flags);
+ DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
+ BDX_DRV_NAME, ndev->name);
+ return NETDEV_TX_LOCKED;
+ }
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6a32ef9..2b0aab1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1531,7 +1531,7 @@ static int emac_dev_open(struct net_device *ndev)
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
- int ret;
+ int q, m, ret;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
@@ -1566,8 +1566,7 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- 0, ndev->name, ndev))
+ if (request_irq(i, emac_irq, 0, ndev->name, ndev))
goto rollback;
}
k++;
@@ -1640,7 +1639,15 @@ static int emac_dev_open(struct net_device *ndev)
rollback:
- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
+
+ for (q = k; k >= 0; k--) {
+ for (m = i; m >= res->start; m--)
+ free_irq(m, ndev);
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
+ m = res->end;
+ }
+
ret = -EBUSY;
err:
pm_runtime_put(&priv->pdev->dev);
@@ -1658,6 +1665,9 @@ err:
*/
static int emac_dev_stop(struct net_device *ndev)
{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@@ -1673,6 +1683,13 @@ static int emac_dev_stop(struct net_device *ndev)
if (priv->phydev)
phy_disconnect(priv->phydev);
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index aa9800e..6d1f6ed 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -174,7 +174,11 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
int add_num = 1;
- spin_lock_irqsave(&rnet->tx_lock, flags);
+ local_irq_save(flags);
+ if (!spin_trylock(&rnet->tx_lock)) {
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a91fa49..1d4da74 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -753,14 +753,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
- add_wait_queue(&unlink_wakeup, &wait);
+ add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@@ -774,15 +772,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
- dev->wait = NULL;
- remove_wait_queue(&unlink_wakeup, &wait);
+ remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval;
+ int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -792,6 +789,8 @@ int usbnet_stop (struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
+ /* to not race resume */
+ pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@@ -818,6 +817,9 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@@ -1438,11 +1440,12 @@ static void usbnet_bh (unsigned long param)
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
- wake_up (dev->wait);
- }
+ /* waiting for all pending urbs to complete?
+ * only then can we forgo submitting anew
+ */
+ if (waitqueue_active(&dev->wait)) {
+ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+ wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@@ -1581,6 +1584,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@@ -1792,9 +1796,10 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- /* handle remote wakeup ASAP */
- if (!dev->wait &&
- netif_device_present(dev->net) &&
+ /* handle remote wakeup ASAP
+ * we cannot race against stop
+ */
+ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 32c45c3..4ecdf3c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -781,6 +781,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (err)
return err;
+ if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
+ return -EAFNOSUPPORT;
+
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
port, vni, ifindex, ndm->ndm_flags);
@@ -1212,6 +1215,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
neigh_release(n);
+ if (reply == NULL)
+ goto out;
+
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1233,15 +1239,103 @@ out:
}
#if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ struct neighbour *n, bool isrouter)
+{
+ struct net_device *dev = request->dev;
+ struct sk_buff *reply;
+ struct nd_msg *ns, *na;
+ struct ipv6hdr *pip6;
+ u8 *daddr;
+ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+ int ns_olen;
+ int i, len;
+
+ if (dev == NULL)
+ return NULL;
+
+ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+ sizeof(*na) + na_olen + dev->needed_tailroom;
+ reply = alloc_skb(len, GFP_ATOMIC);
+ if (reply == NULL)
+ return NULL;
+
+ reply->protocol = htons(ETH_P_IPV6);
+ reply->dev = dev;
+ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+ skb_push(reply, sizeof(struct ethhdr));
+ skb_set_mac_header(reply, 0);
+
+ ns = (struct nd_msg *)skb_transport_header(request);
+
+ daddr = eth_hdr(request)->h_source;
+ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }
+
+ /* Ethernet header */
+ memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN);
+ memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN);
+ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+ reply->protocol = htons(ETH_P_IPV6);
+
+ skb_pull(reply, sizeof(struct ethhdr));
+ skb_set_network_header(reply, 0);
+ skb_put(reply, sizeof(struct ipv6hdr));
+
+ /* IPv6 header */
+
+ pip6 = ipv6_hdr(reply);
+ memset(pip6, 0, sizeof(struct ipv6hdr));
+ pip6->version = 6;
+ pip6->priority = ipv6_hdr(request)->priority;
+ pip6->nexthdr = IPPROTO_ICMPV6;
+ pip6->hop_limit = 255;
+ pip6->daddr = ipv6_hdr(request)->saddr;
+ pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+ skb_pull(reply, sizeof(struct ipv6hdr));
+ skb_set_transport_header(reply, 0);
+
+ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na)+na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ na->icmph.icmp6_router = isrouter;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+ memcpy(&na->opt[2], n->ha, ETH_ALEN);
+ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+ na->opt[1] = na_olen >> 3;
+
+ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+ csum_partial(na, sizeof(*na)+na_olen, 0));
+
+ pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+ skb_push(reply, sizeof(struct ipv6hdr));
+
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return reply;
+}
+
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct neighbour *n;
- union vxlan_addr ipa;
+ struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
- struct nd_msg *msg;
- struct inet6_dev *in6_dev = NULL;
+ struct neighbour *n;
+ struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@@ -1254,19 +1348,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
- if (ipv6_addr_loopback(daddr) ||
- ipv6_addr_is_multicast(daddr))
- goto out;
-
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(&msg->target))
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
+ struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@@ -1280,13 +1375,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
- !!in6_dev->cnf.forwarding,
- true, false, false);
+ reply = vxlan_na_create(skb, n,
+ !!(f ? f->flags & NTF_ROUTER : 0));
+
neigh_release(n);
+
+ if (reply == NULL)
+ goto out;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+
} else if (vxlan->flags & VXLAN_F_L3MISS) {
- ipa.sin6.sin6_addr = *daddr;
- ipa.sa.sa_family = AF_INET6;
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = msg->target,
+ .sa.sa_family = AF_INET6,
+ };
+
vxlan_ip_miss(dev, &ipa);
}
@@ -2383,9 +2488,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
+ /* Unless IPv6 is explicitly requested, assume IPv4 */
+ dst->remote_ip.sa.sa_family = AF_INET;
if (data[IFLA_VXLAN_GROUP]) {
dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
- dst->remote_ip.sa.sa_family = AF_INET;
} else if (data[IFLA_VXLAN_GROUP6]) {
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index edc5d10..03a56df 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
bool blocked;
int err;
+ if (!wl->ucode.bcm43xx_bomminor) {
+ err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+ if (err)
+ return -ENOENT;
+ }
+
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
blocked = brcms_rfkill_set_hw_state(wl);
@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- if (!wl->ucode.bcm43xx_bomminor) {
- err = brcms_request_fw(wl, wl->wlc->hw->d11core);
- if (err) {
- brcms_remove(wl->wlc->hw->d11core);
- return -ENOENT;
- }
- }
-
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 0fad98b..eee2ef6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -596,8 +596,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mutex_lock(&mvm->mutex);
- /* Rssi update while not associated ?! */
- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
goto out_unlock;
/* No open connection - reports should be disabled */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 62aac3b..7bdaf06 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1394,20 +1394,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
};
-#ifdef CONFIG_PREEMPT_RT_BASE
-static irqreturn_t iwl_rt_irq_handler(int irq, void *dev_id)
-{
- irqreturn_t ret;
-
- local_bh_disable();
- ret = iwl_pcie_isr_ict(irq, dev_id);
- local_bh_enable();
- if (ret == IRQ_WAKE_THREAD)
- ret = iwl_pcie_irq_handler(irq, dev_id);
- return ret;
-}
-#endif
-
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg *cfg)
@@ -1526,14 +1512,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
-#ifdef CONFIG_PREEMPT_RT_BASE
- err = request_threaded_irq(pdev->irq, NULL, iwl_rt_irq_handler,
- IRQF_SHARED | IRQF_ONESHOT, DRV_NAME, trans);
-#else
err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
-#endif
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 0fbb1ce..bdfe637 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -714,7 +714,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
- swait_event_interruptible(ctx->done.wait,
+ wait_event_interruptible(ctx->done.wait,
ctx->done.done);
}
break;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f95de0d..1de59b0 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -587,7 +587,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
chan = priv->curchan;
if (chan) {
struct survey_info *survey = &priv->survey[chan->hw_value];
- survey->noise = clamp_t(s8, priv->noise, -128, 127);
+ survey->noise = clamp(priv->noise, -128, 127);
survey->channel_time = priv->survey_raw.active;
survey->channel_time_tx = priv->survey_raw.tx;
survey->channel_time_busy = priv->survey_raw.tx +
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 400fea1..a7501cb 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -102,6 +102,11 @@ struct xenvif {
domid_t domid;
unsigned int handle;
+ /* Is this interface disabled? True when backend discovers
+ * frontend is rogue.
+ */
+ bool disabled;
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 459935a..adfe460 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -66,6 +66,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done;
+ /* This vif is rogue, we pretend we've there is nothing to do
+ * for this vif to deschedule it from NAPI. But this interface
+ * will be turned off in thread context later.
+ */
+ if (unlikely(vif->disabled)) {
+ napi_complete(napi);
+ return 0;
+ }
+
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
@@ -309,6 +318,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->csum = 1;
vif->dev = dev;
+ vif->disabled = false;
+
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6255850..a118653 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -206,8 +206,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -731,7 +731,8 @@ static void xenvif_tx_err(struct xenvif *vif,
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
- xenvif_carrier_off(vif);
+ vif->disabled = true;
+ xenvif_kick_thread(vif);
}
static int xenvif_count_requests(struct xenvif *vif,
@@ -1242,7 +1243,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(vif);
- continue;
+ break;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
@@ -1642,7 +1643,18 @@ int xenvif_kthread(void *data)
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
rx_work_todo(vif) ||
+ vif->disabled ||
kthread_should_stop());
+
+ /* This frontend is found to be rogue, disable it in
+ * kthread context. Currently this is only set when
+ * netback finds out frontend sends malformed packet,
+ * but we cannot disable the interface in softirq
+ * context so we defer it here.
+ */
+ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
+ xenvif_carrier_off(vif);
+
if (kthread_should_stop())
break;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 8bb624e..0857ca9 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
- wake_up_all_locked(&pci_cfg_wait);
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 1953c16..8efd11d 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -866,11 +866,23 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ continue;
+
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
port->port, port->lane);
port->base = NULL;
+ clk_disable_unprepare(port->clk);
continue;
}
@@ -886,22 +898,9 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
port->port, port->lane);
}
- port->clk = of_clk_get_by_name(child, NULL);
- if (IS_ERR(port->clk)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
- port->port, port->lane);
- iounmap(port->base);
- port->haslink = 0;
- continue;
- }
-
port->dn = child;
-
- clk_prepare_enable(port->clk);
spin_lock_init(&port->conf_lock);
-
mvebu_sw_pci_bridge_init(port);
-
i++;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index c10e9ac..510994a 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -268,13 +268,13 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
PCIE_ATU_LIMIT);
dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
@@ -283,7 +283,6 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
@@ -291,6 +290,7 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
@@ -299,7 +299,6 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
PCIE_ATU_VIEWPORT);
dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
@@ -307,6 +306,7 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
@@ -532,7 +532,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup RC BARs */
dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d97fbf4..ea83084 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1806,8 +1806,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
trace_regulator_disable_complete(rdev_get_name(rdev));
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
return 0;
}
@@ -1831,6 +1829,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev_err(rdev, "failed to disable\n");
return ret;
}
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
rdev->use_count = 0;
@@ -1883,20 +1883,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
- /* force disable */
- if (rdev->desc->ops->disable) {
- /* ah well, who wants to live forever... */
- ret = rdev->desc->ops->disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
- return ret;
- }
- /* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
- REGULATOR_EVENT_DISABLE, NULL);
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to force disable\n");
+ return ret;
}
- return ret;
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
+
+ return 0;
}
/**
@@ -3569,8 +3565,6 @@ int regulator_suspend_finish(void)
mutex_lock(&regulator_list_mutex);
list_for_each_entry(rdev, &regulator_list, list) {
- struct regulator_ops *ops = rdev->desc->ops;
-
mutex_lock(&rdev->mutex);
if (rdev->use_count > 0 || rdev->constraints->always_on) {
error = _regulator_do_enable(rdev);
@@ -3579,12 +3573,10 @@ int regulator_suspend_finish(void)
} else {
if (!has_full_constraints)
goto unlock;
- if (!ops->disable)
- goto unlock;
if (!_regulator_is_enabled(rdev))
goto unlock;
- error = ops->disable(rdev);
+ error = _regulator_do_disable(rdev);
if (error)
ret = error;
}
@@ -3774,7 +3766,7 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (!ops->disable || (c && c->always_on))
+ if (c && c->always_on)
continue;
mutex_lock(&rdev->mutex);
@@ -3795,7 +3787,7 @@ static int __init regulator_init_complete(void)
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
- ret = ops->disable(rdev);
+ ret = _regulator_do_disable(rdev);
if (ret != 0) {
rdev_err(rdev, "couldn't disable: %d\n", ret);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 451bf99..846d5c6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2978,12 +2978,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
+ mutex_lock(&block->request_queue->sysfs_lock);
rc = elevator_init(block->request_queue, "deadline");
- if (rc) {
+ if (rc)
blk_cleanup_queue(block->request_queue);
- return rc;
- }
- return 0;
+ mutex_unlock(&block->request_queue->sysfs_lock);
+ return rc;
}
/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index b439c20..07453bb 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1285,7 +1285,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
- unsigned targ_cpu = get_cpu_light();
+ unsigned targ_cpu = get_cpu();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1341,7 +1341,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
- put_cpu_light();
+ put_cpu();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
@@ -1559,11 +1559,11 @@ err2:
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- int rc, cpu = get_cpu_light();
+ int rc;
- fps = &per_cpu(fcoe_percpu, cpu);
+ fps = &get_cpu_var(fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- put_cpu_light();
+ put_cpu_var(fcoe_percpu);
return rc;
}
@@ -1761,11 +1761,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
- stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
- put_cpu_light();
+ put_cpu();
return -EINVAL;
}
@@ -1841,13 +1841,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
- put_cpu_light();
+ put_cpu();
fc_exch_recv(lport, fp);
return;
}
drop:
stats->ErrorFrames++;
- put_cpu_light();
+ put_cpu();
kfree_skb(skb);
}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 48769c3..203415e 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
INIT_LIST_HEAD(&del_list);
- stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
+ stats = per_cpu_ptr(fip->lp->stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
@@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
- put_cpu_light();
+ put_cpu();
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 74c7eb0..5879929 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -741,10 +741,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = get_cpu_light();
+ cpu = get_cpu();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
- put_cpu_light();
+ put_cpu();
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index f8213e1..957088b 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -58,12 +58,12 @@ qla2x00_poll(struct rsp_que *rsp)
{
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
static inline uint8_t *
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index c55f234..26321f9 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config
#include "../comedidev.h"
#include "8255.h"
+#include "mite.h"
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
@@ -79,6 +80,7 @@ struct pci_8255_boardinfo {
const char *name;
int dio_badr;
int n_8255;
+ unsigned int has_mite:1;
};
static const struct pci_8255_boardinfo pci_8255_boards[] = {
@@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.name = "ni_pci-dio-96",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCIDIO96B] = {
.name = "ni_pci-dio-96b",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PXI6508] = {
.name = "ni_pxi-6508",
.dio_badr = 1,
.n_8255 = 4,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503] = {
.name = "ni_pci-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503B] = {
.name = "ni_pci-6503b",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PCI6503X] = {
.name = "ni_pci-6503x",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
[BOARD_NI_PXI_6503] = {
.name = "ni_pxi-6503",
.dio_badr = 1,
.n_8255 = 1,
+ .has_mite = 1,
},
};
@@ -163,6 +172,25 @@ struct pci_8255_private {
void __iomem *mmio_base;
};
+static int pci_8255_mite_init(struct pci_dev *pcidev)
+{
+ void __iomem *mite_base;
+ u32 main_phys_addr;
+
+ /* ioremap the MITE registers (BAR 0) temporarily */
+ mite_base = pci_ioremap_bar(pcidev, 0);
+ if (!mite_base)
+ return -ENOMEM;
+
+ /* set data window to main registers (BAR 1) */
+ main_phys_addr = pci_resource_start(pcidev, 1);
+ writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
+
+ /* finished with MITE registers */
+ iounmap(mite_base);
+ return 0;
+}
+
static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase)
{
void __iomem *mmio_base = (void __iomem *)iobase;
@@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ if (board->has_mite) {
+ ret = pci_8255_mite_init(pcidev);
+ if (ret)
+ return ret;
+ }
+
is_mmio = (pci_resource_flags(pcidev, board->dio_badr) &
IORESOURCE_MEM) != 0;
if (is_mmio) {
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 8fd72ff..d917a34 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -177,9 +177,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
": %d chars not inserted to flip buffer!\n",
length - work);
- /*
- * This may sleep if ->low_latency is set
- */
if (work)
tty_flip_buffer_push(&tty->port);
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index f70ecf1..4f6e01c 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -38,7 +38,6 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/kdb.h>
#ifdef CONFIG_SPARC
#include <linux/sunserialcore.h>
#endif
@@ -81,16 +80,7 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define DEBUG_INTR(fmt...) do { } while (0)
#endif
-/*
- * On -rt we can have a more delays, and legitimately
- * so - so don't drop work spuriously and spam the
- * syslog:
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define PASS_LIMIT 1000000
-#else
-# define PASS_LIMIT 512
-#endif
+#define PASS_LIMIT 512
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
@@ -2874,10 +2864,14 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
touch_nmi_watchdog();
- if (port->sysrq || oops_in_progress || in_kdb_printk())
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
+ local_irq_save(flags);
+ if (port->sysrq) {
+ /* serial8250_handle_irq() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
/*
* First save the IER then disable the interrupts
@@ -2909,7 +2903,8 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
serial8250_modem_status(up);
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
}
static int __init serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 729198c..1440d0b 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1916,19 +1916,13 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_enable(uap->clk);
- /*
- * local_irq_save(flags);
- *
- * This local_irq_save() is nonsense. If we come in via sysrq
- * handling then interrupts are already disabled. Aside of
- * that the port.sysrq check is racy on SMP regardless.
- */
+ local_irq_save(flags);
if (uap->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock_irqsave(&uap->port.lock, flags);
+ locked = spin_trylock(&uap->port.lock);
else
- spin_lock_irqsave(&uap->port.lock, flags);
+ spin_lock(&uap->port.lock);
/*
* First save the CR then disable the interrupts
@@ -1950,7 +1944,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
- spin_unlock_irqrestore(&uap->port.lock, flags);
+ spin_unlock(&uap->port.lock);
+ local_irq_restore(flags);
clk_disable(uap->clk);
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 22c8149..816d1a2 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1232,10 +1232,13 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_get_sync(up->dev);
- if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
+ local_irq_save(flags);
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&up->port.lock);
else
- spin_lock_irqsave(&up->port.lock, flags);
+ spin_lock(&up->port.lock);
/*
* First save the IER then disable the interrupts
@@ -1264,7 +1267,8 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
static int __init
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index dc697ce..cf86e72 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,10 +433,13 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
unsigned long flags;
int locked = 1;
- if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
+ local_irq_save(flags);
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -467,7 +470,8 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -488,10 +492,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
unsigned long flags;
int i, locked = 1;
- if (port->sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
+ local_irq_save(flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@@ -506,7 +507,8 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
}
if (locked)
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 1bf2774..5d6136b 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,16 +844,20 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
unsigned long flags;
int locked = 1;
- if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
- else
- spin_lock_irqsave(&up->port.lock, flags);
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index d88fb63..699cc1b 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,10 +1295,13 @@ static void sunsu_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
- if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
- else
- spin_lock_irqsave(&up->port.lock, flags);
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
/*
* First save the UER then disable the interrupts
@@ -1316,7 +1319,8 @@ static void sunsu_console_write(struct console *co, const char *s,
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 3103c3b..135a152 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,16 +1195,20 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- if (up->port.sysrq || oops_in_progress)
- locked = spin_trylock_irqsave(&up->port.lock, flags);
- else
- spin_lock_irqsave(&up->port.lock, flags);
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock_irqrestore(&up->port.lock, flags);
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 8a3b2a0..2b52d80 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -332,14 +332,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue. It then schedules those characters for
* processing by the line discipline.
- * Note that this function can only be used when the low_latency flag
- * is unset. Otherwise the workqueue won't be flushed.
*/
void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- WARN_ON(port->low_latency);
buf->tail->commit = buf->tail->used;
schedule_work(&buf->work);
@@ -487,17 +484,15 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- if (!tty->port->low_latency)
- flush_work(&tty->port->buf.work);
+ flush_work(&tty->port->buf.work);
}
/**
* tty_flip_buffer_push - terminal
* @port: tty port to push
*
- * Queue a push of the terminal flip buffers to the line discipline. This
- * function must not be called from IRQ context if port->low_latency is
- * set.
+ * Queue a push of the terminal flip buffers to the line discipline.
+ * Can be called from IRQ/atomic context.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
@@ -505,18 +500,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
void tty_flip_buffer_push(struct tty_port *port)
{
- struct tty_bufhead *buf = &port->buf;
-
- buf->tail->commit = buf->tail->used;
-
-#ifndef CONFIG_PREEMPT_RT_FULL
- if (port->low_latency)
- flush_to_ldisc(&buf->work);
- else
- schedule_work(&buf->work);
-#else
- schedule_work(&buf->work);
-#endif
+ tty_schedule_flip(port);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c74a00a..d3448a9 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
*
* Locking: None
*/
-static void tty_line_name(struct tty_driver *driver, int index, char *p)
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
- strcpy(p, driver->name);
+ return sprintf(p, "%s", driver->name);
else
- sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
}
/**
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
if (i >= ARRAY_SIZE(cs))
break;
}
- while (i--)
- count += sprintf(buf + count, "%s%d%c",
- cs[i]->name, cs[i]->index, i ? ' ':'\n');
+ while (i--) {
+ int index = cs[i]->index;
+ struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+ /* don't resolve tty0 as some programs depend on it */
+ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+ count += tty_line_name(drv, index, buf + count);
+ else
+ count += sprintf(buf + count, "%s%d",
+ cs[i]->name, cs[i]->index);
+
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ }
console_unlock();
return count;
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index 5651231..f3eecd9 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -34,6 +34,7 @@
#include <linux/stringify.h>
#include <linux/usb.h>
#include <linux/mutex.h>
+#include <linux/ratelimit.h>
/*
#define VERBOSE_DEBUG
@@ -59,13 +60,12 @@
atm_printk(KERN_INFO, instance , format , ## arg)
#define atm_warn(instance, format, arg...) \
atm_printk(KERN_WARNING, instance , format , ## arg)
-#define atm_dbg(instance, format, arg...) \
- dynamic_pr_debug("ATM dev %d: " format , \
- (instance)->atm_dev->number , ## arg)
-#define atm_rldbg(instance, format, arg...) \
- if (printk_ratelimit()) \
- atm_dbg(instance , format , ## arg)
-
+#define atm_dbg(instance, format, ...) \
+ pr_debug("ATM dev %d: " format, \
+ (instance)->atm_dev->number, ##__VA_ARGS__)
+#define atm_rldbg(instance, format, ...) \
+ pr_debug_ratelimited("ATM dev %d: " format, \
+ (instance)->atm_dev->number, ##__VA_ARGS__)
/* flags, set by mini-driver in bind() */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index bf21a21..d6a8d23 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1677,9 +1677,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
- local_irq_save_nort(flags);
+ local_irq_save(flags);
urb->complete(urb);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
atomic_dec(&urb->use_count);
if (unlikely(atomic_read(&urb->reject)))
@@ -2331,7 +2331,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
* when the first handler doesn't use it. So let's just
* assume it's never used.
*/
- local_irq_save_nort(flags);
+ local_irq_save(flags);
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
@@ -2340,7 +2340,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
else
rc = IRQ_HANDLED;
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index fd2fe19..44cf775 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1282,7 +1282,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- swaitqueue_active(&ffs->ep0req_completion.wait));
+ waitqueue_active(&ffs->ep0req_completion.wait));
kfree(ffs->dev_name);
kfree(ffs);
}
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 1033ecc..b94c049 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -340,7 +340,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
- value = swait_event_interruptible (done.wait, done.done);
+ value = wait_event_interruptible (done.wait, done.done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
@@ -349,7 +349,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
- swait_event (done.wait, done.done);
+ wait_event (done.wait, done.done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index b369292..ad0aca8 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port)
port->read_started--;
}
- /* Push from tty to ldisc; without low_latency set this is handled by
- * a workqueue, so we won't get callbacks and can hold port_lock
+ /* Push from tty to ldisc; this is handled by a workqueue,
+ * so we won't get callbacks and can hold port_lock
*/
if (do_push)
tty_flip_buffer_push(&port->port);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b691278..604cad1 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -874,13 +874,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
if (ints & OHCI_INTR_WDH) {
- if (ohci->hcca->done_head == 0) {
- ints &= ~OHCI_INTR_WDH;
- } else {
- spin_lock (&ohci->lock);
- dl_done_list (ohci);
- spin_unlock (&ohci->lock);
- }
+ spin_lock (&ohci->lock);
+ dl_done_list (ohci);
+ spin_unlock (&ohci->lock);
}
if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 73f5208..1af67a2 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fb7a8f..54af4e9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index b12176f..5264d83 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -501,9 +501,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -528,6 +532,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -583,6 +593,14 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index dacaf74..b6d5008 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1108,14 +1108,16 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPUT_VSCREENINFO:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
if (!ret && copy_to_user(argp, &var, sizeof(var)))
ret = -EFAULT;
break;
@@ -1144,12 +1146,14 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOPAN_DISPLAY:
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
ret = fb_pan_display(info, &var);
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
break;
@@ -1184,23 +1188,27 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
}
event.data = &con2fb;
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
event.info = info;
ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
break;
case FBIOBLANK:
- if (!lock_fb_info(info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(info)) {
+ console_unlock();
+ return -ENODEV;
+ }
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_blank(info, arg);
info->flags &= ~FBINFO_MISC_USEREVENT;
- console_unlock();
unlock_fb_info(info);
+ console_unlock();
break;
default:
if (!lock_fb_info(info))
@@ -1569,10 +1577,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
static int do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
- int i;
+ int i, ret;
/* check all firmware fbs and kick off if the base addr overlaps */
for (i = 0 ; i < FB_MAX; i++) {
@@ -1588,25 +1596,31 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
- printk(KERN_INFO "fb: conflicting fb hw usage "
- "%s vs %s - removing generic driver\n",
+ printk(KERN_INFO "fb: switching to %s from %s\n",
name, registered_fb[i]->fix.id);
- do_unregister_framebuffer(registered_fb[i]);
+ ret = do_unregister_framebuffer(registered_fb[i]);
+ if (ret)
+ return ret;
}
}
+
+ return 0;
}
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, ret;
struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
- fb_is_primary_device(fb_info));
+ ret = do_remove_conflicting_framebuffers(fb_info->apertures,
+ fb_info->fix.id,
+ fb_is_primary_device(fb_info));
+ if (ret)
+ return ret;
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1660,12 +1674,15 @@ static int do_register_framebuffer(struct fb_info *fb_info)
registered_fb[i] = fb_info;
event.info = fb_info;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
return 0;
}
@@ -1678,13 +1695,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
event.info = fb_info;
ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
if (ret)
return -EINVAL;
@@ -1725,12 +1745,16 @@ int unlink_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(unlink_framebuffer);
-void remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
+ int ret;
+
mutex_lock(&registration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
+ ret = do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
+
+ return ret;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
@@ -1916,6 +1940,9 @@ int fb_get_options(const char *name, char **option)
options = opt + name_len + 1;
}
}
+ /* No match, pass global option */
+ if (!options && option && fb_mode_option)
+ options = kstrdup(fb_mode_option, GFP_KERNEL);
if (options && !strncmp(options, "off", 3))
retval = 1;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index ef476b0..53444ac 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,9 +177,12 @@ static ssize_t store_modes(struct device *device,
if (i * sizeof(struct fb_videomode) != count)
return -EINVAL;
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
&fb_info->modelist);
@@ -189,8 +192,8 @@ static ssize_t store_modes(struct device *device,
} else
fb_destroy_modelist(&old_list);
- console_unlock();
unlock_fb_info(fb_info);
+ console_unlock();
return 0;
}
@@ -404,12 +407,16 @@ static ssize_t store_fbstate(struct device *device,
state = simple_strtoul(buf, &last, 0);
- if (!lock_fb_info(fb_info))
- return -ENODEV;
console_lock();
+ if (!lock_fb_info(fb_info)) {
+ console_unlock();
+ return -ENODEV;
+ }
+
fb_set_suspend(fb_info, (int)state);
- console_unlock();
+
unlock_fb_info(fb_info);
+ console_unlock();
return count;
}
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0264704..45d0312 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -574,8 +574,9 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
switch (event) {
case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
/* HDMI plug in */
+ console_lock();
if (lock_fb_info(info)) {
- console_lock();
+
ch->display.width = monspec->max_x * 10;
ch->display.height = monspec->max_y * 10;
@@ -594,19 +595,20 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
fb_set_suspend(info, 0);
}
- console_unlock();
+
unlock_fb_info(info);
}
+ console_unlock();
break;
case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
/* HDMI disconnect */
+ console_lock();
if (lock_fb_info(info)) {
- console_lock();
fb_set_suspend(info, 1);
- console_unlock();
unlock_fb_info(info);
}
+ console_unlock();
break;
case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f572c0..cfda0a6 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -312,6 +312,12 @@ static int balloon(void *_vballoon)
else if (diff < 0)
leak_balloon(vb, -diff);
update_balloon_size(vb);
+
+ /*
+ * For large balloon changes, we could spend a lot of time
+ * and always have work to do. Be nice if preempt disabled.
+ */
+ cond_resched();
}
return 0;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b232908..4c437ef 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -405,11 +405,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
+ scrub_page(page);
- pfn = page_to_pfn(page);
- frame_list[i] = pfn_to_mfn(pfn);
+ frame_list[i] = page_to_pfn(page);
+ }
- scrub_page(page);
+ /*
+ * Ensure that ballooned highmem pages don't have kmaps.
+ *
+ * Do this before changing the p2m as kmap_flush_unused()
+ * reads PTEs to obtain pages (and hence needs the original
+ * p2m entry).
+ */
+ kmap_flush_unused();
+
+ /* Update direct mapping, invalidate P2M, and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = frame_list[i];
+ frame_list[i] = pfn_to_mfn(pfn);
+ page = pfn_to_page(pfn);
/*
* Ballooned out frames are effectively replaced with
@@ -433,11 +447,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
put_balloon_scratch_page();
- balloon_append(pfn_to_page(pfn));
+ balloon_append(page);
}
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index c2ed9c4..3f1128b 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -34,7 +34,6 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index b422ad6..3d9d3f5 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -157,7 +157,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
- cpu_chill();
+ cpu_relax();
goto relock;
}
spin_unlock(&p->d_lock);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62176ad..84d590a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3246,6 +3246,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
/* send down all the barriers */
head = &info->fs_devices->devices;
list_for_each_entry_rcu(dev, head, dev_list) {
+ if (dev->missing)
+ continue;
if (!dev->bdev) {
errors_send++;
continue;
@@ -3260,6 +3262,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
/* wait for all the barriers */
list_for_each_entry_rcu(dev, head, dev_list) {
+ if (dev->missing)
+ continue;
if (!dev->bdev) {
errors_wait++;
continue;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index b791cfb..25d64e8 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -663,7 +663,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
- if (--trans->use_count) {
+ if (trans->use_count > 1) {
+ trans->use_count--;
trans->block_rsv = trans->orig_rsv;
return 0;
}
@@ -711,17 +712,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
- if (throttle) {
- /*
- * We may race with somebody else here so end up having
- * to call end_transaction on ourselves again, so inc
- * our use_count.
- */
- trans->use_count++;
+ if (throttle)
return btrfs_commit_transaction(trans, root);
- } else {
+ else
wake_up_process(info->transaction_kthread);
- }
}
if (trans->type & __TRANS_FREEZABLE)
diff --git a/fs/buffer.c b/fs/buffer.c
index e0fa2a4..aeeea65 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -322,7 +322,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done.
*/
first = page_buffers(page);
- flags = bh_uptodate_lock_irqsave(first);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -335,7 +336,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
@@ -347,7 +349,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
+ return;
}
/*
@@ -381,7 +385,8 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
first = page_buffers(page);
- flags = bh_uptodate_lock_irqsave(first);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -393,12 +398,15 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
+ return;
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3328,7 +3336,6 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
- buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
diff --git a/fs/dcache.c b/fs/dcache.c
index ae2a83e..8ef74f3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -555,7 +554,7 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure)
relock:
if (unlock_on_failure) {
spin_unlock(&dentry->d_lock);
- cpu_chill();
+ cpu_relax();
}
return dentry; /* try again with same dentry */
}
@@ -2392,7 +2391,7 @@ again:
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
- cpu_chill();
+ cpu_relax();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
@@ -2847,9 +2846,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
u32 dlen = ACCESS_ONCE(name->len);
char *p;
- if (*buflen < dlen + 1)
- return -ENAMETOOLONG;
*buflen -= dlen + 1;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
p = *buffer -= dlen + 1;
*p++ = '/';
while (dlen--) {
@@ -2894,6 +2893,7 @@ static int prepend_path(const struct path *path,
restart:
bptr = *buffer;
blen = *buflen;
+ error = 0;
dentry = path->dentry;
vfsmnt = path->mnt;
mnt = real_mount(vfsmnt);
@@ -3122,19 +3122,22 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
-static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
+static char *__dentry_path(struct dentry *d, char *buf, int buflen)
{
+ struct dentry *dentry;
char *end, *retval;
int len, seq = 0;
int error = 0;
+ if (buflen < 2)
+ goto Elong;
+
rcu_read_lock();
restart:
+ dentry = d;
end = buf + buflen;
len = buflen;
prepend(&end, &len, "\0", 1);
- if (buflen < 1)
- goto Elong;
/* Get '/' right */
retval = end-1;
*retval = '/';
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index d90909e..a5e34dd 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -649,6 +649,7 @@ static void process_sctp_notification(struct connection *con,
struct msghdr *msg, char *buf)
{
union sctp_notification *sn = (union sctp_notification *)buf;
+ struct linger linger;
switch (sn->sn_header.sn_type) {
case SCTP_SEND_FAILED:
@@ -727,6 +728,13 @@ static void process_sctp_notification(struct connection *con,
}
add_sock(new_con->sock, new_con);
+ linger.l_onoff = 1;
+ linger.l_linger = 0;
+ ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
+ (char *)&linger, sizeof(linger));
+ if (ret < 0)
+ log_print("set socket option SO_LINGER failed");
+
log_print("connecting to %d sctp association %d",
nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 9691a6e..d76c974 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu_light();
+ int this_cpu = get_cpu();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
- put_cpu_light();
+ put_cpu();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
diff --git a/fs/exec.c b/fs/exec.c
index eb02518..bb8afc1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -840,12 +840,10 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
- preempt_disable_rt();
active_mm = tsk->active_mm;
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
- preempt_enable_rt();
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e678549..8dd9659 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2616,6 +2616,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
+ /*
+ * If we're starting with an extent other than the last one in the
+ * node, we need to see if it shares a cluster with the extent to
+ * the right (towards the end of the file). If its leftmost cluster
+ * is this extent's rightmost cluster and it is not cluster aligned,
+ * we'll mark it as a partial that is not to be deallocated.
+ */
+
+ if (ex != EXT_LAST_EXTENT(eh)) {
+ ext4_fsblk_t current_pblk, right_pblk;
+ long long current_cluster, right_cluster;
+
+ current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
+ current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
+ right_pblk = ext4_ext_pblock(ex + 1);
+ right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
+ if (current_cluster == right_cluster &&
+ EXT4_PBLK_COFF(sbi, right_pblk))
+ *partial_cluster = -right_cluster;
+ }
+
trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
while (ex >= EXT_FIRST_EXTENT(eh) &&
@@ -2741,10 +2762,15 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
err = ext4_ext_correct_indexes(handle, inode, path);
/*
- * Free the partial cluster only if the current extent does not
- * reference it. Otherwise we might free used cluster.
+ * If there's a partial cluster and at least one extent remains in
+ * the leaf, free the partial cluster if it isn't shared with the
+ * current extent. If there's a partial cluster and no extents
+ * remain in the leaf, it can't be freed here. It can only be
+ * freed when it's possible to determine if it's not shared with
+ * any other extent - when the next leaf is processed or when space
+ * removal is complete.
*/
- if (*partial_cluster > 0 &&
+ if (*partial_cluster > 0 && eh->eh_entries &&
(EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
*partial_cluster)) {
int flags = get_default_free_blocks_flags(inode);
@@ -4159,7 +4185,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
- int free_on_err = 0, err = 0, depth;
+ int free_on_err = 0, err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
@@ -4220,9 +4246,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (!ext4_ext_is_uninitialized(ex))
goto out;
- allocated = ext4_ext_handle_uninitialized_extents(
+ ret = ext4_ext_handle_uninitialized_extents(
handle, inode, map, path, flags,
allocated, newblock);
+ if (ret < 0)
+ err = ret;
+ else
+ allocated = ret;
goto out3;
}
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 55fe412..f173ef1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/aio.h>
+#include <linux/bitops.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -3922,18 +3923,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
+ unsigned int new_fl = 0;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT4_SYNC_FL)
- inode->i_flags |= S_SYNC;
+ new_fl |= S_SYNC;
if (flags & EXT4_APPEND_FL)
- inode->i_flags |= S_APPEND;
+ new_fl |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
+ new_fl |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
- inode->i_flags |= S_NOATIME;
+ new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
- inode->i_flags |= S_DIRSYNC;
+ new_fl |= S_DIRSYNC;
+ set_mask_bits(&inode->i_flags,
+ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
@@ -4444,7 +4447,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
return -EIO;
}
- if (wbc->sync_mode != WB_SYNC_ALL)
+ /*
+ * No need to force transaction in WB_SYNC_NONE mode. Also
+ * ext4_sync_fs() will force the commit after everything is
+ * written.
+ */
+ if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
err = ext4_force_commit(inode->i_sb);
@@ -4454,7 +4462,11 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
- if (wbc->sync_mode == WB_SYNC_ALL)
+ /*
+ * sync(2) will flush the whole buffer cache. No need to do
+ * it here separately for each inode.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3595180..5bbec31 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -88,16 +88,29 @@ static inline struct inode *wb_inode(struct list_head *head)
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>
+static void bdi_wakeup_thread(struct backing_dev_info *bdi)
+{
+ spin_lock_bh(&bdi->wb_lock);
+ if (test_bit(BDI_registered, &bdi->state))
+ mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
+ spin_unlock_bh(&bdi->wb_lock);
+}
+
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
trace_writeback_queue(bdi, work);
spin_lock_bh(&bdi->wb_lock);
+ if (!test_bit(BDI_registered, &bdi->state)) {
+ if (work->done)
+ complete(work->done);
+ goto out_unlock;
+ }
list_add_tail(&work->list, &bdi->work_list);
- spin_unlock_bh(&bdi->wb_lock);
-
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
+out_unlock:
+ spin_unlock_bh(&bdi->wb_lock);
}
static void
@@ -113,7 +126,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
trace_writeback_nowork(bdi);
- mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
+ bdi_wakeup_thread(bdi);
return;
}
@@ -160,7 +173,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
* writeback as soon as there is no other work to do.
*/
trace_writeback_wake_background(bdi);
- mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
+ bdi_wakeup_thread(bdi);
}
/*
@@ -1016,7 +1029,7 @@ void bdi_writeback_workfn(struct work_struct *work)
current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() ||
- list_empty(&bdi->bdi_list))) {
+ !test_bit(BDI_registered, &bdi->state))) {
/*
* The normal path. Keep writing back @bdi until its
* work_list is empty. Note that this path is also taken
@@ -1038,10 +1051,10 @@ void bdi_writeback_workfn(struct work_struct *work)
trace_writeback_pages_written(pages_written);
}
- if (!list_empty(&bdi->work_list) ||
- (wb_has_dirty_io(wb) && dirty_writeback_interval))
- queue_delayed_work(bdi_wq, &wb->dwork,
- msecs_to_jiffies(dirty_writeback_interval * 10));
+ if (!list_empty(&bdi->work_list))
+ mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+ bdi_wakeup_thread_delayed(bdi);
current->flags &= ~PF_SWAPWRITE;
}
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959ef..b5ebc2d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
struct fscache_object *xobj;
struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+ ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
+
write_lock(&fscache_object_list_lock);
while (*p) {
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
*/
void fscache_objlist_remove(struct fscache_object *obj)
{
+ if (RB_EMPTY_NODE(&obj->objlist_link))
+ return;
+
write_lock(&fscache_object_list_lock);
BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 86d75a6..fec4134 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ RB_CLEAR_NODE(&object->objlist_link);
+#endif
object->oob_event_mask = 0;
for (t = object->oob_table; t->events; t++)
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 95debd7..08c0304 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -129,8 +129,6 @@ void __log_wait_for_space(journal_t *journal)
if (journal->j_flags & JFS_ABORT)
return;
spin_unlock(&journal->j_state_lock);
- if (current->plug)
- io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 8c40400..7f34f47 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -125,8 +125,6 @@ void __jbd2_log_wait_for_space(journal_t *journal)
if (journal->j_flags & JBD2_ABORT)
return;
write_unlock(&journal->j_state_lock);
- if (current->plug)
- io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 16a5047..406d9cc 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
- short positions[256];
+ unsigned short positions[256];
int outpos = 0;
int pos=0;
@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
- short positions[256];
+ unsigned short positions[256];
int outpos = 0;
int pos=0;
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index e4619b0..fa35ff7 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
uint32_t version;
uint32_t data_crc;
uint32_t partial_crc;
- uint16_t csize;
+ uint32_t csize;
uint16_t overlapped;
};
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 0331072..b6bd4af 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -179,6 +179,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
spin_unlock(&c->erase_completion_lock);
schedule();
+ remove_wait_queue(&c->erase_wait, &wait);
} else
spin_unlock(&c->erase_completion_lock);
} else if (ret)
@@ -211,20 +212,25 @@ out:
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
uint32_t *len, uint32_t sumsize)
{
- int ret = -EAGAIN;
+ int ret;
minsize = PAD(minsize);
jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
- spin_lock(&c->erase_completion_lock);
- while(ret == -EAGAIN) {
+ while (true) {
+ spin_lock(&c->erase_completion_lock);
ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
if (ret) {
jffs2_dbg(1, "%s(): looping, ret is %d\n",
__func__, ret);
}
+ spin_unlock(&c->erase_completion_lock);
+
+ if (ret == -EAGAIN)
+ cond_resched();
+ else
+ break;
}
- spin_unlock(&c->erase_completion_lock);
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
diff --git a/fs/namespace.c b/fs/namespace.c
index 22cbfab..84447db 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,7 +23,6 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
-#include <linux/delay.h>
#include "pnode.h"
#include "internal.h"
@@ -316,11 +315,8 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
- preempt_enable();
- cpu_chill();
- preempt_disable();
- }
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e81a1ca..1402806 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -276,6 +276,15 @@ out_eof:
return -EBADCOOKIE;
}
+static bool
+nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
+{
+ if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
+ return false;
+ smp_rmb();
+ return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
+}
+
static
int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
{
@@ -289,8 +298,8 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
struct nfs_open_dir_context *ctx = desc->file->private_data;
new_pos = desc->current_index + i;
- if (ctx->attr_gencount != nfsi->attr_gencount
- || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
+ if (ctx->attr_gencount != nfsi->attr_gencount ||
+ !nfs_readdir_inode_mapping_valid(nfsi)) {
ctx->duped = 0;
ctx->attr_gencount = nfsi->attr_gencount;
} else if (new_pos < desc->ctx->pos) {
@@ -1139,7 +1148,13 @@ out_zap_parent:
if (inode && S_ISDIR(inode->i_mode)) {
/* Purge readdir caches. */
nfs_zap_caches(inode);
- if (dentry->d_flags & DCACHE_DISCONNECTED)
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+ * it from shrink_dcache_for_unmount(), leading to busy
+ * inodes on unmount and further oopses.
+ */
+ if (IS_ROOT(dentry))
goto out_valid;
}
/* If we have submounts, don't unhash ! */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 91ff089..af5f3ff 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -223,14 +223,31 @@ out:
* Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
* the iocb is still valid here if this is a synchronous request.
*/
-static void nfs_direct_complete(struct nfs_direct_req *dreq)
+static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
{
+ struct inode *inode = dreq->inode;
+
+ if (dreq->iocb && write) {
+ loff_t pos = dreq->iocb->ki_pos + dreq->count;
+
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < pos)
+ i_size_write(inode, pos);
+ spin_unlock(&inode->i_lock);
+ }
+
+ if (write)
+ nfs_zap_mapping(inode, inode->i_mapping);
+
+ inode_dio_done(inode);
+
if (dreq->iocb) {
long res = (long) dreq->error;
if (!res)
res = (long) dreq->count;
aio_complete(dreq->iocb, res, 0);
}
+
complete_all(&dreq->completion);
nfs_direct_req_release(dreq);
@@ -273,7 +290,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
}
out_put:
if (put_dreq(dreq))
- nfs_direct_complete(dreq);
+ nfs_direct_complete(dreq, false);
hdr->release(hdr);
}
@@ -403,6 +420,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
loff_t pos, bool uio)
{
struct nfs_pageio_descriptor desc;
+ struct inode *inode = dreq->inode;
ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
@@ -411,6 +429,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
&nfs_direct_read_completion_ops);
get_dreq(dreq);
desc.pg_dreq = dreq;
+ atomic_inc(&inode->i_dio_count);
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
@@ -430,12 +449,13 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
+ inode_dio_done(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
if (put_dreq(dreq))
- nfs_direct_complete(dreq);
+ nfs_direct_complete(dreq, false);
return 0;
}
@@ -473,12 +493,6 @@ out:
return result;
}
-static void nfs_inode_dio_write_done(struct inode *inode)
-{
- nfs_zap_mapping(inode, inode->i_mapping);
- inode_dio_done(inode);
-}
-
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
@@ -594,8 +608,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
nfs_direct_write_reschedule(dreq);
break;
default:
- nfs_inode_dio_write_done(dreq->inode);
- nfs_direct_complete(dreq);
+ nfs_direct_complete(dreq, true);
}
}
@@ -611,8 +624,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
- nfs_inode_dio_write_done(inode);
- nfs_direct_complete(dreq);
+ nfs_direct_complete(dreq, true);
}
#endif
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 0ee22ab..fdeeb28 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -988,11 +988,11 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
if (ret < 0)
return ret;
}
- spin_lock(&inode->i_lock);
- nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode)) {
+ spin_lock(&inode->i_lock);
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
- spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
+ }
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
nfs_fscache_wait_on_invalidate(inode);
@@ -1018,6 +1018,7 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long *bitlock = &nfsi->flags;
int ret = 0;
/* swapfiles are not supposed to be shared. */
@@ -1029,12 +1030,46 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
if (ret < 0)
goto out;
}
- if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
- trace_nfs_invalidate_mapping_enter(inode);
- ret = nfs_invalidate_mapping(inode, mapping);
- trace_nfs_invalidate_mapping_exit(inode, ret);
+
+ /*
+ * We must clear NFS_INO_INVALID_DATA first to ensure that
+ * invalidations that come in while we're shooting down the mappings
+ * are respected. But, that leaves a race window where one revalidator
+ * can clear the flag, and then another checks it before the mapping
+ * gets invalidated. Fix that by serializing access to this part of
+ * the function.
+ *
+ * At the same time, we need to allow other tasks to see whether we
+ * might be in the middle of invalidating the pages, so we only set
+ * the bit lock here if it looks like we're going to be doing that.
+ */
+ for (;;) {
+ ret = wait_on_bit(bitlock, NFS_INO_INVALIDATING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ if (ret)
+ goto out;
+ spin_lock(&inode->i_lock);
+ if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+ break;
+ spin_unlock(&inode->i_lock);
+ goto out;
}
+ set_bit(NFS_INO_INVALIDATING, bitlock);
+ smp_wmb();
+ nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+ spin_unlock(&inode->i_lock);
+ trace_nfs_invalidate_mapping_enter(inode);
+ ret = nfs_invalidate_mapping(inode, mapping);
+ trace_nfs_invalidate_mapping_exit(inode, ret);
+
+ clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
+ smp_mb__after_clear_bit();
+ wake_up_bit(bitlock, NFS_INO_INVALIDATING);
out:
return ret;
}
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 4a1aafb..4612291 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -289,8 +289,8 @@ getout:
return acl;
}
-static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
- struct posix_acl *dfacl)
+static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_fattr *fattr;
@@ -373,6 +373,15 @@ out:
return status;
}
+int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
+{
+ int ret;
+ ret = __nfs3_proc_setacls(inode, acl, dfacl);
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+
+}
+
int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
{
struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -406,7 +415,7 @@ int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
if (IS_ERR(alloc))
goto fail;
}
- status = nfs3_proc_setacls(inode, acl, dfacl);
+ status = __nfs3_proc_setacls(inode, acl, dfacl);
posix_acl_release(alloc);
return status;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 28842ab..fdfd591 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -249,6 +249,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
extern int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task);
+extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index b86464b..394b0a0 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -335,8 +335,11 @@ static void filelayout_read_call_done(struct rpc_task *task, void *data)
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
- task->tk_status == 0)
+ task->tk_status == 0) {
+ if (rdata->res.seq_res.sr_slot != NULL)
+ nfs41_sequence_done(task, &rdata->res.seq_res);
return;
+ }
/* Note this may cause RPC to be resent */
rdata->header->mds_ops->rpc_call_done(task, data);
@@ -442,8 +445,11 @@ static void filelayout_write_call_done(struct rpc_task *task, void *data)
struct nfs_write_data *wdata = data;
if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
- task->tk_status == 0)
+ task->tk_status == 0) {
+ if (wdata->res.seq_res.sr_slot != NULL)
+ nfs41_sequence_done(task, &wdata->res.seq_res);
return;
+ }
/* Note this may cause RPC to be resent */
wdata->header->mds_ops->rpc_call_done(task, data);
@@ -1216,17 +1222,17 @@ static void filelayout_recover_commit_reqs(struct list_head *dst,
struct pnfs_commit_bucket *b;
int i;
- /* NOTE cinfo->lock is NOT held, relying on fact that this is
- * only called on single thread per dreq.
- * Can't take the lock because need to do pnfs_put_lseg
- */
+ spin_lock(cinfo->lock);
for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
+ spin_unlock(cinfo->lock);
pnfs_put_lseg(b->wlseg);
b->wlseg = NULL;
+ spin_lock(cinfo->lock);
}
}
cinfo->ds->nwritten = 0;
+ spin_unlock(cinfo->lock);
}
static unsigned int
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index c7c295e5..efac602 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -95,7 +95,7 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
b6 = (struct sockaddr_in6 *)addr2;
/* LINKLOCAL addresses must have matching scope_id */
- if (ipv6_addr_scope(&a6->sin6_addr) ==
+ if (ipv6_addr_src_scope(&a6->sin6_addr) ==
IPV6_ADDR_SCOPE_LINKLOCAL &&
a6->sin6_scope_id != b6->sin6_scope_id)
return false;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a536517..bcd42fb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -585,7 +585,7 @@ out_unlock:
nfs41_server_notify_highest_slotid_update(session->clp);
}
-static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
struct nfs4_session *session;
struct nfs4_slot *slot;
@@ -685,6 +685,7 @@ out_retry:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return 0;
}
+EXPORT_SYMBOL_GPL(nfs41_sequence_done);
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
@@ -5779,21 +5780,20 @@ struct nfs_release_lockowner_data {
struct nfs4_lock_state *lsp;
struct nfs_server *server;
struct nfs_release_lockowner_args args;
- struct nfs4_sequence_args seq_args;
- struct nfs4_sequence_res seq_res;
+ struct nfs_release_lockowner_res res;
};
static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
nfs40_setup_sequence(data->server,
- &data->seq_args, &data->seq_res, task);
+ &data->args.seq_args, &data->res.seq_res, task);
}
static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
- nfs40_sequence_done(task, &data->seq_res);
+ nfs40_sequence_done(task, &data->res.seq_res);
}
static void nfs4_release_lockowner_release(void *calldata)
@@ -5822,7 +5822,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
data = kmalloc(sizeof(*data), GFP_NOFS);
if (!data)
return -ENOMEM;
- nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
data->lsp = lsp;
data->server = server;
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
@@ -5830,6 +5829,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
data->args.lock_owner.s_dev = server->s_dev;
msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
return 0;
}
@@ -7273,7 +7274,14 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
return;
server = NFS_SERVER(lrp->args.inode);
- if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ switch (task->tk_status) {
+ default:
+ task->tk_status = 0;
+ case 0:
+ break;
+ case -NFS4ERR_DELAY:
+ if (nfs4_async_handle_error(task, server, NULL) != -EAGAIN)
+ break;
rpc_restart_call_prepare(task);
return;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b2f842d..1c2beb1 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3405,7 +3405,7 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
{
__be32 *p;
- *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
+ *res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 89fe741..59f838c 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -36,6 +36,7 @@
__print_flags(v, "|", \
{ 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
{ 1 << NFS_INO_STALE, "STALE" }, \
+ { 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
{ 1 << NFS_INO_FLUSHING, "FLUSHING" }, \
{ 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
{ 1 << NFS_INO_COMMIT, "COMMIT" }, \
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 28466be..c6aa89f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -909,9 +909,14 @@ bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
*/
static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
if (nfs_have_delegated_attributes(inode))
goto out;
- if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+ if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+ return false;
+ smp_rmb();
+ if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
return false;
out:
return PageUptodate(page) != 0;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 33f0d86..d267ea6 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,7 +108,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
- flags = bh_uptodate_lock_irqsave(first);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -123,7 +124,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -144,13 +146,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save_nort(flags);
+ local_irq_save(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
@@ -158,7 +160,9 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
- bh_uptodate_unlock_irqrestore(first, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
+ return;
}
/**
diff --git a/fs/proc/page.c b/fs/proc/page.c
index b8730d9..2a8cc94 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -121,7 +121,7 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU to make
* sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
+ else if (PageTransCompound(page) && PageLRU(compound_head(page)))
u |= 1 << KPF_THP;
/*
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 106a835..9fa2154 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -235,6 +235,7 @@ void __init proc_device_tree_init(void)
return;
root = of_find_node_by_path("/");
if (root == NULL) {
+ remove_proc_entry("device-tree", NULL);
pr_debug("/proc/device-tree: can't find root\n");
return;
}
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d20e45a..9293121 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -380,10 +380,7 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
- if (isalarm(ctx))
- hrtimer_wait_for_timer(&ctx->t.alarm.timer);
- else
- hrtimer_wait_for_timer(&ctx->t.tmr);
+ cpu_relax();
}
/*
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 20bf8e8..a6fcbd2 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1335,7 +1335,7 @@ xfs_da3_fixhashpath(
node = blk->bp->b_addr;
xfs_da3_node_hdr_from_disk(&nodehdr, node);
btree = xfs_da3_node_tree_p(node);
- if (be32_to_cpu(btree->hashval) == lasthash)
+ if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
break;
blk->hashval = lasthash;
btree[blk->index].hashval = cpu_to_be32(lasthash);
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 0ef299d..68534ef 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -72,7 +72,6 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
-#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
#else /* !__KERNEL__ */
@@ -175,19 +174,6 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
lock ? AE_OK : AE_NO_MEMORY; \
})
-#define acpi_os_create_raw_lock(__handle) \
-({ \
- raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
- \
- if (lock) { \
- *(__handle) = lock; \
- raw_spin_lock_init(*(__handle)); \
- } \
- lock ? AE_OK : AE_NO_MEMORY; \
-})
-
-#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
-
#endif /* __KERNEL__ */
#endif /* __ACLINUX_H__ */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index aee7fd2..7d10f96 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -202,20 +202,6 @@ extern void warn_slowpath_null(const char *file, const int line);
# define WARN_ON_SMP(x) ({0;})
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define BUG_ON_RT(c) BUG_ON(c)
-# define BUG_ON_NONRT(c) do { } while (0)
-# define WARN_ON_RT(condition) WARN_ON(condition)
-# define WARN_ON_NONRT(condition) do { } while (0)
-# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
-#else
-# define BUG_ON_RT(c) do { } while (0)
-# define BUG_ON_NONRT(c) BUG_ON(c)
-# define WARN_ON_RT(condition) do { } while (0)
-# define WARN_ON_NONRT(condition) WARN_ON(condition)
-# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
-#endif
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f4995..ec5d737 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1108,6 +1108,8 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
+extern void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5f66d51..a4cf599 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -95,7 +95,7 @@ struct backing_dev_info {
unsigned int max_ratio, max_prop_frac;
struct bdi_writeback wb; /* default writeback info for this bdi */
- spinlock_t wb_lock; /* protects work_list */
+ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
struct list_head work_list;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3b6b82..c1dde8e 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -185,6 +185,21 @@ static inline unsigned long __ffs64(u64 word)
#ifdef __KERNEL__
+#ifndef set_mask_bits
+#define set_mask_bits(ptr, _mask, _bits) \
+({ \
+ const typeof(*ptr) mask = (_mask), bits = (_bits); \
+ typeof(*ptr) old, new; \
+ \
+ do { \
+ old = ACCESS_ONCE(*ptr); \
+ new = (old & ~mask) | bits; \
+ } while (cmpxchg(ptr, old, new) != old); \
+ \
+ new; \
+})
+#endif
+
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index cac4973..d77797a 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -75,52 +75,8 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t b_uptodate_lock;
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spinlock_t b_state_lock;
- spinlock_t b_journal_head_lock;
-#endif
-#endif
};
-static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
-{
- unsigned long flags;
-
-#ifndef CONFIG_PREEMPT_RT_BASE
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
-#else
- spin_lock_irqsave(&bh->b_uptodate_lock, flags);
-#endif
- return flags;
-}
-
-static inline void
-bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
- local_irq_restore(flags);
-#else
- spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
-#endif
-}
-
-static inline void buffer_head_init_locks(struct buffer_head *bh)
-{
-#ifdef CONFIG_PREEMPT_RT_BASE
- spin_lock_init(&bh->b_uptodate_lock);
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spin_lock_init(&bh->b_state_lock);
- spin_lock_init(&bh->b_journal_head_lock);
-#endif
-#endif
-}
-
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 7c1420b..6ade97d 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -157,7 +157,7 @@ struct ceph_msg {
bool front_is_vmalloc;
bool more_to_follow;
bool needs_out_seq;
- int front_max;
+ int front_alloc_len;
unsigned long ack_stamp; /* tx: when we were acked */
struct ceph_msgpool *pool;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 8f47625..4fb6a89 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -138,6 +138,7 @@ struct ceph_osd_request {
__le64 *r_request_pool;
void *r_request_pgid;
__le32 *r_request_attempts;
+ bool r_paused;
struct ceph_eversion *r_request_reassert_version;
int r_result;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3561d30..7b3a119 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -169,6 +169,8 @@ struct cgroup {
*
* The ID of the root cgroup is always 0, and a new cgroup
* will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
*/
int id;
diff --git a/include/linux/completion.h b/include/linux/completion.h
index eb2d4ac..3cd574d 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -8,7 +8,7 @@
* See kernel/sched/core.c for details.
*/
-#include <linux/wait-simple.h>
+#include <linux/wait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,11 +24,11 @@
*/
struct completion {
unsigned int done;
- struct swait_head wait;
+ wait_queue_head_t wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -73,7 +73,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_swait_head(&x->wait);
+ init_waitqueue_head(&x->wait);
}
extern void wait_for_completion(struct completion *);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0c2b05c..801ff9e 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -179,8 +179,6 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
-extern void pin_current_cpu(void);
-extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -208,8 +206,6 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
-static inline void pin_current_cpu(void) { }
-static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 37caab3..a6ecb34 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -52,10 +52,4 @@ static inline void ssleep(unsigned int seconds)
msleep(seconds * 1000);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void cpu_chill(void);
-#else
-# define cpu_chill() cpu_relax()
-#endif
-
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffac70a..8439a16 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -613,8 +613,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
-extern void remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
+extern int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 90209e6..20aebdb 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -56,9 +56,6 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
- unsigned short migrate_disable;
- unsigned short padding;
- unsigned char preempt_lazy_count;
};
#define FTRACE_MAX_EVENT \
@@ -328,10 +325,6 @@ enum {
FILTER_TRACE_FN,
};
-#define EVENT_STORAGE_SIZE 128
-extern struct mutex event_storage_mutex;
-extern char event_storage[EVENT_STORAGE_SIZE];
-
extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b0d95ca..6435f46 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -55,7 +55,11 @@ union futex_key {
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+#define futex_cmpxchg_enabled 1
+#else
extern int futex_cmpxchg_enabled;
+#endif
#else
static inline void exit_robust_list(struct task_struct *curr)
{
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 821d523..7fb31da 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,7 +7,6 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -86,51 +85,32 @@ static inline void __kunmap_atomic(void *addr)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-#ifndef CONFIG_PREEMPT_RT_FULL
DECLARE_PER_CPU(int, __kmap_atomic_idx);
-#endif
static inline int kmap_atomic_idx_push(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx > KM_TYPE_NR);
-# endif
- return idx;
-#else
- current->kmap_idx++;
- BUG_ON(current->kmap_idx > KM_TYPE_NR);
- return current->kmap_idx - 1;
#endif
+ return idx;
}
static inline int kmap_atomic_idx(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
return __this_cpu_read(__kmap_atomic_idx) - 1;
-#else
- return current->kmap_idx - 1;
-#endif
}
static inline void kmap_atomic_idx_pop(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
BUG_ON(idx < 0);
-# else
- __this_cpu_dec(__kmap_atomic_idx);
-# endif
#else
- current->kmap_idx--;
-# ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(current->kmap_idx < 0);
-# endif
+ __this_cpu_dec(__kmap_atomic_idx);
#endif
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bdbf77db..d19a5c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,11 +111,6 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
- struct list_head cb_entry;
- int irqsafe;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- ktime_t praecox;
-#endif
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -152,7 +147,6 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
@@ -196,9 +190,6 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- wait_queue_head_t wait;
-#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
@@ -394,13 +385,6 @@ static inline int hrtimer_restart(struct hrtimer *timer)
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
-/* Softirq preemption could deadlock timer removal */
-#ifdef CONFIG_PREEMPT_RT_BASE
- extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
-#else
-# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
-#endif
-
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
@@ -461,8 +445,9 @@ extern int schedule_hrtimeout_range_clock(ktime_t *expires,
unsigned long delta, const enum hrtimer_mode mode, int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
-/* Called from the periodic timer tick */
+/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
+extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428..a291552 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -156,23 +156,6 @@ static inline int hpage_nr_pages(struct page *page)
return HPAGE_PMD_NR;
return 1;
}
-static inline struct page *compound_trans_head(struct page *page)
-{
- if (PageTail(page)) {
- struct page *head;
- head = page->first_page;
- smp_rmb();
- /*
- * head may be a dangling pointer.
- * __split_huge_page_refcount clears PageTail before
- * overwriting first_page, so if PageTail is still
- * there it means the head pointer isn't dangling.
- */
- if (PageTail(page))
- return head;
- }
- return page;
-}
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
@@ -202,7 +185,6 @@ static inline int split_huge_page(struct page *page)
do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
do { } while (0)
-#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 267527b..871a213 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -92,14 +92,10 @@ void idr_init(struct idr *idp);
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
-void idr_preload_end(void);
-#else
static inline void idr_preload_end(void)
{
preempt_enable();
}
-#endif
/**
* idr_find - return pointer for given id
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 766558a..5cd0f09 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -143,16 +143,9 @@ extern struct task_group root_task_group;
# define INIT_PERF_EVENTS(tsk)
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define INIT_TIMER_LIST .posix_timer_list = NULL,
-#else
-# define INIT_TIMER_LIST
-#endif
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
- .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
- .vtime_seq = SEQCNT_ZERO, \
+ .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
.vtime_snap = 0, \
.vtime_snap_whence = VTIME_SYS,
#else
@@ -214,7 +207,6 @@ extern struct task_group root_task_group;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
- INIT_TIMER_LIST \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2609fb..5e865b5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -58,7 +58,6 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SHARED 0x00000080
@@ -72,7 +71,6 @@
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
-#define IRQF_NO_SOFTIRQ_CALL 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -178,7 +176,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
-# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+# define local_irq_enable_in_hardirq() local_irq_enable()
#endif
extern void disable_irq_nosync(unsigned int irq);
@@ -222,7 +220,6 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
- struct list_head list;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
@@ -317,13 +314,9 @@ static inline int disable_irq_wake(unsigned int irq)
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
extern bool force_irqthreads;
-# else
-# define force_irqthreads (true)
-# endif
#else
-#define force_irqthreads (false)
+#define force_irqthreads (0)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
@@ -379,14 +372,8 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-static inline void thread_do_softirq(void) { do_softirq(); }
-#else
-extern void thread_do_softirq(void);
-#endif
-
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -394,8 +381,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
-extern void softirq_check_pending_idle(void);
-
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
@@ -436,9 +421,8 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU, it is rescheduled
- for later.
- * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -463,36 +447,27 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
- TASKLET_STATE_PENDING /* Tasklet is pending */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
-{
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
-}
-
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
-
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
#else
#define tasklet_trylock(t) 1
-#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -541,8 +516,17 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
-extern void tasklet_enable(struct tasklet_struct *t);
-extern void tasklet_hi_enable(struct tasklet_struct *t);
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@@ -574,12 +558,6 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void softirq_early_init(void);
-#else
-static inline void softirq_early_init(void) { }
-#endif
-
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e2d8789..56bb0dc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,7 +70,6 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
- * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -95,14 +94,12 @@ enum {
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_NO_SOFTIRQ_CALL = (1 << 18),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_NO_SOFTIRQ_CALL)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 60c19ee..6601702 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -16,7 +16,6 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index da992bc..56fb646 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -52,7 +52,6 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- u64 random_ip;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 0977829..d176d65 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -25,6 +25,8 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -37,15 +39,9 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-#endif
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
-#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -151,23 +147,4 @@
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
-/*
- * local_irq* variants depending on RT/!RT
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define local_irq_disable_nort() do { } while (0)
-# define local_irq_enable_nort() do { } while (0)
-# define local_irq_save_nort(flags) local_save_flags(flags)
-# define local_irq_restore_nort(flags) (void)(flags)
-# define local_irq_disable_rt() local_irq_disable()
-# define local_irq_enable_rt() local_irq_enable()
-#else
-# define local_irq_disable_nort() local_irq_disable()
-# define local_irq_enable_nort() local_irq_enable()
-# define local_irq_save_nort(flags) local_irq_save(flags)
-# define local_irq_restore_nort(flags) local_irq_restore(flags)
-# define local_irq_disable_rt() do { } while (0)
-# define local_irq_enable_rt() do { } while (0)
-#endif
-
#endif
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index a90a6f5..3dc5343 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -15,56 +15,32 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
-#else
- spin_lock(&bh->b_state_lock);
-#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
-#else
- return spin_trylock(&bh->b_state_lock);
-#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
-#else
- return spin_is_locked(&bh->b_state_lock);
-#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
-#else
- spin_unlock(&bh->b_state_lock);
-#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
-#else
- spin_lock(&bh->b_journal_head_lock);
-#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
-#else
- spin_unlock(&bh->b_journal_head_lock);
-#endif
}
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 006627b..a507907 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -49,8 +49,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
- !defined(CONFIG_PREEMPT_BASE)
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
struct static_key {
atomic_t enabled;
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 680ad23..7f6fe6e 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -115,7 +115,7 @@ extern int kdb_trap_printk;
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-#define in_kdb_printk() (kdb_trap_printk)
+
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
@@ -150,7 +150,6 @@ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
-#define in_kdb_printk() (0)
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c34e608..672ddc4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -412,7 +412,6 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
- SYSTEM_SUSPEND,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index d2c0d6d..0d24e93 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -42,37 +42,22 @@
#endif
struct lglock {
-#ifndef CONFIG_PREEMPT_RT_FULL
arch_spinlock_t __percpu *lock;
-#else
- struct rt_mutex __percpu *lock;
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
struct lockdep_map lock_dep_map;
#endif
};
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define DEFINE_LGLOCK(name) \
+#define DEFINE_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
struct lglock name = { .lock = &name ## _lock }
-# define DEFINE_STATIC_LGLOCK(name) \
+#define DEFINE_STATIC_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
static struct lglock name = { .lock = &name ## _lock }
-#else
-
-# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- struct lglock name = { .lock = &name ## _lock }
-
-# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- static struct lglock name = { .lock = &name ## _lock }
-#endif
void lg_lock_init(struct lglock *lg, char *name);
void lg_local_lock(struct lglock *lg);
diff --git a/include/linux/list.h b/include/linux/list.h
index 885943e..f4d8a2f 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -373,17 +373,6 @@ static inline void list_splice_tail_init(struct list_head *list,
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
- * list_last_entry - get the last element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_last_entry(ptr, type, member) \
- list_entry((ptr)->prev, type, member)
-
-/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index d8876a0..2eb8855 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -2,7 +2,6 @@
#define _LINUX_LIST_BL_H
#include <linux/list.h>
-#include <linux/spinlock.h>
#include <linux/bit_spinlock.h>
/*
@@ -33,22 +32,13 @@
struct hlist_bl_head {
struct hlist_bl_node *first;
-#ifdef CONFIG_PREEMPT_RT_BASE
- raw_spinlock_t lock;
-#endif
};
struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};
-
-static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
-{
- h->first = NULL;
-#ifdef CONFIG_PREEMPT_RT_BASE
- raw_spin_lock_init(&h->lock);
-#endif
-}
+#define INIT_HLIST_BL_HEAD(ptr) \
+ ((ptr)->first = NULL)
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
@@ -127,26 +117,12 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(0, (unsigned long *)b);
-#else
- raw_spin_lock(&b->lock);
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- __set_bit(0, (unsigned long *)b);
-#endif
-#endif
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
__bit_spin_unlock(0, (unsigned long *)b);
-#else
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- __clear_bit(0, (unsigned long *)b);
-#endif
- raw_spin_unlock(&b->lock);
-#endif
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
deleted file mode 100644
index 21653e9..0000000
--- a/include/linux/locallock.h
+++ /dev/null
@@ -1,270 +0,0 @@
-#ifndef _LINUX_LOCALLOCK_H
-#define _LINUX_LOCALLOCK_H
-
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define LL_WARN(cond) WARN_ON(cond)
-#else
-# define LL_WARN(cond) do { } while (0)
-#endif
-
-/*
- * per cpu lock based substitute for local_irq_*()
- */
-struct local_irq_lock {
- spinlock_t lock;
- struct task_struct *owner;
- int nestcnt;
- unsigned long flags;
-};
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
- DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
- .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
-
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
- DECLARE_PER_CPU(struct local_irq_lock, lvar)
-
-#define local_irq_lock_init(lvar) \
- do { \
- int __cpu; \
- for_each_possible_cpu(__cpu) \
- spin_lock_init(&per_cpu(lvar, __cpu).lock); \
- } while (0)
-
-/*
- * spin_lock|trylock|unlock_local flavour that does not migrate disable
- * used for __local_lock|trylock|unlock where get_local_var/put_local_var
- * already takes care of the migrate_disable/enable
- * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define spin_lock_local(lock) rt_spin_lock(lock)
-# define spin_trylock_local(lock) rt_spin_trylock(lock)
-# define spin_unlock_local(lock) rt_spin_unlock(lock)
-#else
-# define spin_lock_local(lock) spin_lock(lock)
-# define spin_trylock_local(lock) spin_trylock(lock)
-# define spin_unlock_local(lock) spin_unlock(lock)
-#endif
-
-static inline void __local_lock(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- spin_lock_local(&lv->lock);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- }
- lv->nestcnt++;
-}
-
-#define local_lock(lvar) \
- do { __local_lock(&get_local_var(lvar)); } while (0)
-
-static inline int __local_trylock(struct local_irq_lock *lv)
-{
- if (lv->owner != current && spin_trylock_local(&lv->lock)) {
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
- return 1;
- }
- return 0;
-}
-
-#define local_trylock(lvar) \
- ({ \
- int __locked; \
- __locked = __local_trylock(&get_local_var(lvar)); \
- if (!__locked) \
- put_local_var(lvar); \
- __locked; \
- })
-
-static inline void __local_unlock(struct local_irq_lock *lv)
-{
- LL_WARN(lv->nestcnt == 0);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return;
-
- lv->owner = NULL;
- spin_unlock_local(&lv->lock);
-}
-
-#define local_unlock(lvar) \
- do { \
- __local_unlock(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-static inline void __local_lock_irq(struct local_irq_lock *lv)
-{
- spin_lock_irqsave(&lv->lock, lv->flags);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
-}
-
-#define local_lock_irq(lvar) \
- do { __local_lock_irq(&get_local_var(lvar)); } while (0)
-
-#define local_lock_irq_on(lvar, cpu) \
- do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
-
-static inline void __local_unlock_irq(struct local_irq_lock *lv)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- lv->owner = NULL;
- lv->nestcnt = 0;
- spin_unlock_irq(&lv->lock);
-}
-
-#define local_unlock_irq(lvar) \
- do { \
- __local_unlock_irq(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irq_on(lvar, cpu) \
- do { \
- __local_unlock_irq(&per_cpu(lvar, cpu)); \
- } while (0)
-
-static inline int __local_lock_irqsave(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- __local_lock_irq(lv);
- return 0;
- } else {
- lv->nestcnt++;
- return 1;
- }
-}
-
-#define local_lock_irqsave(lvar, _flags) \
- do { \
- if (__local_lock_irqsave(&get_local_var(lvar))) \
- put_local_var(lvar); \
- _flags = __get_cpu_var(lvar).flags; \
- } while (0)
-
-#define local_lock_irqsave_on(lvar, _flags, cpu) \
- do { \
- __local_lock_irqsave(&per_cpu(lvar, cpu)); \
- _flags = per_cpu(lvar, cpu).flags; \
- } while (0)
-
-static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
- unsigned long flags)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return 0;
-
- lv->owner = NULL;
- spin_unlock_irqrestore(&lv->lock, lv->flags);
- return 1;
-}
-
-#define local_unlock_irqrestore(lvar, flags) \
- do { \
- if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irqrestore_on(lvar, flags, cpu) \
- do { \
- __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
- } while (0)
-
-#define local_spin_trylock_irq(lvar, lock) \
- ({ \
- int __locked; \
- local_lock_irq(lvar); \
- __locked = spin_trylock(lock); \
- if (!__locked) \
- local_unlock_irq(lvar); \
- __locked; \
- })
-
-#define local_spin_lock_irq(lvar, lock) \
- do { \
- local_lock_irq(lvar); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irq(lvar, lock) \
- do { \
- spin_unlock(lock); \
- local_unlock_irq(lvar); \
- } while (0)
-
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- do { \
- local_lock_irqsave(lvar, flags); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- do { \
- spin_unlock(lock); \
- local_unlock_irqrestore(lvar, flags); \
- } while (0)
-
-#define get_locked_var(lvar, var) \
- (*({ \
- local_lock(lvar); \
- &__get_cpu_var(var); \
- }))
-
-#define put_locked_var(lvar, var) local_unlock(lvar);
-
-#define local_lock_cpu(lvar) \
- ({ \
- local_lock(lvar); \
- smp_processor_id(); \
- })
-
-#define local_unlock_cpu(lvar) local_unlock(lvar)
-
-#else /* PREEMPT_RT_BASE */
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
-
-static inline void local_irq_lock_init(int lvar) { }
-
-#define local_lock(lvar) preempt_disable()
-#define local_unlock(lvar) preempt_enable()
-#define local_lock_irq(lvar) local_irq_disable()
-#define local_unlock_irq(lvar) local_irq_enable()
-#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
-#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
-
-#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
-#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
-#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- spin_lock_irqsave(lock, flags)
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- spin_unlock_irqrestore(lock, flags)
-
-#define get_locked_var(lvar, var) get_cpu_var(var)
-#define put_locked_var(lvar, var) put_cpu_var(var)
-
-#define local_lock_cpu(lvar) get_cpu()
-#define local_unlock_cpu(lvar) put_cpu()
-
-#endif
-
-#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c7ebd78..0737343 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -371,8 +371,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
static inline struct page *compound_head(struct page *page)
{
- if (unlikely(PageTail(page)))
- return page->first_page;
+ if (unlikely(PageTail(page))) {
+ struct page *head = page->first_page;
+
+ /*
+ * page->first_page may be a dangling pointer to an old
+ * compound page, so recheck that it is still a tail
+ * page before returning.
+ */
+ smp_rmb();
+ if (likely(PageTail(page)))
+ return head;
+ }
return page;
}
@@ -1242,59 +1252,27 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
-
#define __pte_lockptr(page) &((page)->ptl)
-
-static inline struct page *pte_lock_init(struct page *page)
-{
- spin_lock_init(__pte_lockptr(page));
- return page;
-}
-
+#define pte_lock_init(_page) do { \
+ spin_lock_init(__pte_lockptr(_page)); \
+} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
-
-#else /* !PREEMPT_RT_FULL */
-
-/*
- * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
- * page frame, hence it only has a pointer and we need to dynamically
- * allocate the lock when we allocate PTE-pages.
- *
- * This is an overall win, since only a small fraction of the pages
- * will be PTE pages under normal circumstances.
- */
-
-#define __pte_lockptr(page) ((page)->ptl)
-
-extern struct page *pte_lock_init(struct page *page);
-extern void pte_lock_deinit(struct page *page);
-
-#endif /* PREEMPT_RT_FULL */
-
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-static inline struct page *pte_lock_init(struct page *page) { return page; }
+#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
-static inline struct page *__pgtable_page_ctor(struct page *page)
+static inline void pgtable_page_ctor(struct page *page)
{
- page = pte_lock_init(page);
- if (page)
- inc_zone_page_state(page, NR_PAGETABLE);
- return page;
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
}
-#define pgtable_page_ctor(page) \
-do { \
- page = __pgtable_page_ctor(page); \
-} while (0)
-
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d87823c..8e082f1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,7 +11,6 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
-#include <linux/rcupdate.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
@@ -143,11 +142,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTLOCKS
-# ifndef CONFIG_PREEMPT_RT_FULL
spinlock_t ptl;
-# else
- spinlock_t *ptl;
-# endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
@@ -449,9 +444,6 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head delayed_drop;
-#endif
};
/* first nid will either be a valid NID or one of these values */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 79e172a..bab49da 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,17 +18,6 @@
#include <linux/atomic.h>
#include <asm/processor.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/mutex_rt.h>
-#else
-
/*
* Simple, straightforward mutexes with strict semantics:
*
@@ -110,6 +99,13 @@ do { \
static inline void mutex_destroy(struct mutex *lock) {}
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -177,8 +173,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
-#endif /* !PREEMPT_RT_FULL */
-
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef arch_mutex_cpu_relax
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
deleted file mode 100644
index c38a44b..0000000
--- a/include/linux/mutex_rt.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef __LINUX_MUTEX_RT_H
-#define __LINUX_MUTEX_RT_H
-
-#ifndef __LINUX_MUTEX_H
-#error "Please include mutex.h"
-#endif
-
-#include <linux/rtmutex.h>
-
-/* FIXME: Just for __lockfunc */
-#include <linux/spinlock.h>
-
-struct mutex {
- struct rt_mutex lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __MUTEX_INITIALIZER(mutexname) \
- { \
- .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
- __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
- }
-
-#define DEFINE_MUTEX(mutexname) \
- struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-
-extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
-extern void __lockfunc _mutex_lock(struct mutex *lock);
-extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
-extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
-extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
-extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_trylock(struct mutex *lock);
-extern void __lockfunc _mutex_unlock(struct mutex *lock);
-
-#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
-#define mutex_lock(l) _mutex_lock(l)
-#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
-#define mutex_lock_killable(l) _mutex_lock_killable(l)
-#define mutex_trylock(l) _mutex_trylock(l)
-#define mutex_unlock(l) _mutex_unlock(l)
-#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible_nested(l, s)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable_nested(l, s)
-
-# define mutex_lock_nest_lock(lock, nest_lock) \
-do { \
- typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
- _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
-} while (0)
-
-#else
-# define mutex_lock_nested(l, s) _mutex_lock(l)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible(l)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable(l)
-# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-#endif
-
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), #mutex, &__key); \
-} while (0)
-
-# define __mutex_init(mutex, name, key) \
-do { \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), name, key); \
-} while (0)
-
-#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1f48a4e..9f2a0cb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1214,7 +1214,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
- unsigned short neigh_priv_len;
+ unsigned char neigh_priv_len;
unsigned short dev_id; /* Used to differentiate devices
* that share the same link
* layer address
@@ -1930,7 +1930,6 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
- struct sk_buff_head tofree_queue;
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 7d083af..dd49566 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,7 +3,6 @@
#include <linux/netdevice.h>
-#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -285,8 +284,6 @@ extern void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
-DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
-
/**
* xt_write_recseq_begin - start of a write section
*
@@ -301,9 +298,6 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
- /* RT protection */
- local_lock(xt_write_lock);
-
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
@@ -334,7 +328,6 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
- local_unlock(xt_write_lock);
}
/*
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 3ea4cde..a632498 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -215,6 +215,7 @@ struct nfs_inode {
#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
+#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ea9e076..2b30701 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -467,9 +467,14 @@ struct nfs_lockt_res {
};
struct nfs_release_lockowner_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_lowner lock_owner;
};
+struct nfs_release_lockowner_res {
+ struct nfs4_sequence_res seq_res;
+};
+
struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 2e4414a..d14a4c3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
-
+
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
@@ -42,7 +42,9 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed.
+ * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+ * chains are slightly more difficult to use because they require special
+ * runtime initialization.
*/
typedef int (*notifier_fn_t)(struct notifier_block *nb,
@@ -86,7 +88,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be cleaned up dynamically */
+/* srcu_notifier_heads must be initialized and cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -99,13 +101,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-
-#define SRCU_NOTIFIER_INIT(name, pcpu) \
- { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
- }
+/* srcu_notifier_heads cannot be initialized statically */
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -117,18 +113,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
-#define _SRCU_NOTIFIER_HEAD(name, mod) \
- static DEFINE_PER_CPU(struct srcu_struct_array, \
- name##_head_srcu_array); \
- mod struct srcu_notifier_head name = \
- SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
-
-#define SRCU_NOTIFIER_HEAD(name) \
- _SRCU_NOTIFIER_HEAD(name, )
-
-#define SRCU_NOTIFIER_HEAD_STATIC(name) \
- _SRCU_NOTIFIER_HEAD(name, static)
-
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@@ -198,12 +182,12 @@ static inline int notifier_to_errno(int ret)
/*
* Declared notifiers so far. I can imagine quite a few more chains
- * over time (eg laptop power reset chains, reboot chain (to clean
+ * over time (eg laptop power reset chains, reboot chain (to clean
* device units up), device [un]mount chain, module load/unload chain,
- * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
-
+
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index ca67e80..777a524 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -24,9 +24,6 @@ enum {
*/
struct page_cgroup {
unsigned long flags;
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t pcg_lock;
-#endif
struct mem_cgroup *mem_cgroup;
};
@@ -77,20 +74,12 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
* Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, MIGRATION
*/
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(PCG_LOCK, &pc->flags);
-#else
- spin_lock(&pc->pcg_lock);
-#endif
}
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(PCG_LOCK, &pc->flags);
-#else
- spin_unlock(&pc->pcg_lock);
-#endif
}
#else /* CONFIG_MEMCG */
@@ -113,10 +102,6 @@ static inline void __init page_cgroup_init_flatmem(void)
{
}
-static inline void page_cgroup_lock_init(struct page_cgroup *pc)
-{
-}
-
#endif /* CONFIG_MEMCG */
#include <linux/swap.h>
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f05adf5..c74088a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,31 +48,6 @@
preempt_enable(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define get_local_var(var) get_cpu_var(var)
-# define put_local_var(var) put_cpu_var(var)
-# define get_local_ptr(var) get_cpu_ptr(var)
-# define put_local_ptr(var) put_cpu_ptr(var)
-#else
-# define get_local_var(var) (*({ \
- migrate_disable(); \
- &__get_cpu_var(var); }))
-
-# define put_local_var(var) do { \
- (void)&(var); \
- migrate_enable(); \
-} while (0)
-
-# define get_local_ptr(var) ({ \
- migrate_disable(); \
- this_cpu_ptr(var); })
-
-# define put_local_ptr(var) do { \
- (void)(var); \
- migrate_enable(); \
-} while (0)
-#endif
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 2cc64b7..23705a5 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,7 +2,6 @@
#define _LINUX_PID_H
#include <linux/rcupdate.h>
-#include <linux/atomic.h>
enum pid_type
{
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index c153cf2..f5d4723 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -23,38 +23,15 @@
#define preempt_count() (current_thread_info()->preempt_count)
-#ifdef CONFIG_PREEMPT_LAZY
-#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-#else
-#define add_preempt_lazy_count(val) do { } while (0)
-#define sub_preempt_lazy_count(val) do { } while (0)
-#define inc_preempt_lazy_count() do { } while (0)
-#define dec_preempt_lazy_count() do { } while (0)
-#define preempt_lazy_count() (0)
-#endif
-
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
-# ifdef CONFIG_PREEMPT_LAZY
#define preempt_check_resched() \
do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \
- test_thread_flag(TIF_NEED_RESCHED_LAZY))) \
- preempt_schedule(); \
-} while (0)
-# else
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
-# endif
#ifdef CONFIG_CONTEXT_TRACKING
@@ -87,36 +64,17 @@ do { \
barrier(); \
} while (0)
-#define preempt_lazy_disable() \
-do { \
- inc_preempt_lazy_count(); \
- barrier(); \
-} while (0)
-
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_BASE
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() barrier()
-#else
-# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() preempt_check_resched()
-#endif
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preempt_enable() \
do { \
- sched_preempt_enable_no_resched(); \
- barrier(); \
- preempt_check_resched(); \
-} while (0)
-
-#define preempt_lazy_enable() \
-do { \
- dec_preempt_lazy_count(); \
+ preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
@@ -165,31 +123,9 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
-#define preempt_check_resched_rt() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define preempt_disable_rt() preempt_disable()
-# define preempt_enable_rt() preempt_enable()
-# define preempt_disable_nort() barrier()
-# define preempt_enable_nort() barrier()
-# ifdef CONFIG_SMP
- extern void migrate_disable(void);
- extern void migrate_enable(void);
-# else /* CONFIG_SMP */
-# define migrate_disable() barrier()
-# define migrate_enable() barrier()
-# endif /* CONFIG_SMP */
-#else
-# define preempt_disable_rt() barrier()
-# define preempt_enable_rt() barrier()
-# define preempt_disable_nort() preempt_disable()
-# define preempt_enable_nort() preempt_enable()
-# define migrate_disable() preempt_disable()
-# define migrate_enable() preempt_enable()
-#endif
-
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 199f278..931bc61 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -58,11 +58,7 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
@@ -75,15 +71,9 @@
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#else
-# define softirq_count() (0UL)
-extern int in_serving_softirq(void);
-#endif
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -94,6 +84,7 @@ extern int in_serving_softirq(void);
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6add55e..6949258 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,11 +101,9 @@ int no_printk(const char *fmt, ...)
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
void early_vprintk(const char *fmt, va_list ap);
-extern void printk_kill(void);
#else
static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
-static inline void printk_kill(void) { }
#endif
#ifdef CONFIG_PRINTK
@@ -139,6 +137,7 @@ extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
@@ -234,6 +233,8 @@ extern asmlinkage void dump_stack(void) __cold;
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
+#include <linux/dynamic_debug.h>
+
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
@@ -344,7 +345,19 @@ extern asmlinkage void dump_stack(void) __cold;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define pr_debug_ratelimited(fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ __ratelimit(&_rs)) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5b6d5b2..4039407 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -230,13 +230,8 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
-#ifndef CONFIG_PREEMPT_RT_FULL
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
-#else
-static inline int radix_tree_preload(gfp_t gm) { return 0; }
-static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
-#endif
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
@@ -261,7 +256,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
- preempt_enable_nort();
+ preempt_enable();
}
/**
diff --git a/include/linux/random.h b/include/linux/random.h
index de4894a..bf9085e 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -12,7 +12,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 8b2693d..f1f1bc3 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -128,9 +128,6 @@ extern void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RT_FULL
-#define call_rcu_bh call_rcu
-#else
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -154,7 +151,6 @@ extern void call_rcu(struct rcu_head *head,
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-#endif
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -194,11 +190,6 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-#ifndef CONFIG_PREEMPT_RT_FULL
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-#else
-static inline int sched_rcu_preempt_depth(void) { return 0; }
-#endif
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -222,8 +213,6 @@ static inline int rcu_preempt_depth(void)
return 0;
}
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
@@ -373,14 +362,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline int rcu_read_lock_bh_held(void)
-{
- return rcu_read_lock_held();
-}
-#else
extern int rcu_read_lock_bh_held(void);
-#endif
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -837,14 +819,10 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_lock();
-#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_lock_bh() used illegally while idle");
-#endif
}
/*
@@ -854,14 +832,10 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_unlock();
-#else
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
-#endif
local_bh_enable();
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 91333de..226169d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -44,11 +44,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define synchronize_rcu_bh synchronize_rcu
-#else
extern void synchronize_rcu_bh(void);
-#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
@@ -76,19 +72,17 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define rcu_barrier_bh rcu_barrier
-#else
extern void rcu_barrier_bh(void);
-#endif
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
+extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
extern void exit_rcu(void);
@@ -96,12 +90,4 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-#ifndef CONFIG_PREEMPT_RT_FULL
-extern void rcu_bh_force_quiescent_state(void);
-extern long rcu_batches_completed_bh(void);
-#else
-# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
-# define rcu_batches_completed_bh rcu_batches_completed
-#endif
-
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa18682..de17134 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -14,14 +14,10 @@
#include <linux/linkage.h>
#include <linux/plist.h>
-#include <linux/spinlock_types_raw.h>
+#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
-#ifdef CONFIG_DEBUG_MUTEXES
-#include <linux/debug_locks.h>
-#endif
-
/**
* The rt_mutex structure
*
@@ -33,10 +29,9 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
- int save_state;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *file;
- const char *name;
+ int save_state;
+ const char *name, *file;
int line;
void *magic;
#endif
@@ -61,39 +56,19 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
-#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
, .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
-
-#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- , .save_state = 1 }
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@@ -115,7 +90,6 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
deleted file mode 100644
index 49ed2d4..0000000
--- a/include/linux/rwlock_rt.h
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef __LINUX_RWLOCK_RT_H
-#define __LINUX_RWLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#define rwlock_init(rwl) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(rwl)->lock); \
- __rt_rwlock_init(rwl, #rwl, &__key); \
-} while (0)
-
-extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
-extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
-extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
-extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
-
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
-
-#define write_trylock_irqsave(lock, flags) \
- __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
-
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = rt_read_lock_irqsave(lock); \
- } while (0)
-
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = rt_write_lock_irqsave(lock); \
- } while (0)
-
-#define read_lock(lock) rt_read_lock(lock)
-
-#define read_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- rt_read_lock(lock); \
- } while (0)
-
-#define read_lock_irq(lock) read_lock(lock)
-
-#define write_lock(lock) rt_write_lock(lock)
-
-#define write_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- rt_write_lock(lock); \
- } while (0)
-
-#define write_lock_irq(lock) write_lock(lock)
-
-#define read_unlock(lock) rt_read_unlock(lock)
-
-#define read_unlock_bh(lock) \
- do { \
- rt_read_unlock(lock); \
- local_bh_enable(); \
- } while (0)
-
-#define read_unlock_irq(lock) read_unlock(lock)
-
-#define write_unlock(lock) rt_write_unlock(lock)
-
-#define write_unlock_bh(lock) \
- do { \
- rt_write_unlock(lock); \
- local_bh_enable(); \
- } while (0)
-
-#define write_unlock_irq(lock) write_unlock(lock)
-
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_read_unlock(lock); \
- } while (0)
-
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_write_unlock(lock); \
- } while (0)
-
-#endif
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index d0da966..cc0072e 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,10 +1,6 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
-#if !defined(__LINUX_SPINLOCK_TYPES_H)
-# error "Do not include directly, include spinlock_types.h"
-#endif
-
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
@@ -47,7 +43,6 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
deleted file mode 100644
index b138321..0000000
--- a/include/linux/rwlock_types_rt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_RWLOCK_TYPES_RT_H
-#define __LINUX_RWLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * rwlocks - rtmutex which allows single reader recursion
- */
-typedef struct {
- struct rt_mutex lock;
- int read_depth;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RW_LOCK_UNLOCKED(name) \
- { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 0ad6070..0616ffe 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -16,10 +16,6 @@
#include <linux/atomic.h>
-#ifdef CONFIG_PREEMPT_RT_FULL
-#include <linux/rwsem_rt.h>
-#else /* PREEMPT_RT_FULL */
-
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -153,6 +149,4 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
# define up_read_non_owner(sem) up_read(sem)
#endif
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
deleted file mode 100644
index e94d945..0000000
--- a/include/linux/rwsem_rt.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef _LINUX_RWSEM_RT_H
-#define _LINUX_RWSEM_RT_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Include rwsem.h"
-#endif
-
-/*
- * RW-semaphores are a spinlock plus a reader-depth count.
- *
- * Note that the semantics are different from the usual
- * Linux rw-sems, in PREEMPT_RT mode we do not allow
- * multiple readers to hold the lock at once, we only allow
- * a read-lock owner to read-lock recursively. This is
- * better for latency, makes the implementation inherently
- * fair and makes it simpler as well.
- */
-
-#include <linux/rtmutex.h>
-
-struct rw_semaphore {
- struct rt_mutex lock;
- int read_depth;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __RWSEM_INITIALIZER(name) \
- { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(lockname) \
- struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
-
-extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key);
-
-#define __rt_init_rwsem(sem, name, key) \
- do { \
- rt_mutex_init(&(sem)->lock); \
- __rt_rwsem_init((sem), (name), (key));\
- } while (0)
-
-#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
-
-# define rt_init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __rt_init_rwsem((sem), #sem, &__key); \
-} while (0)
-
-extern void rt_down_write(struct rw_semaphore *rwsem);
-extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
- struct lockdep_map *nest);
-extern void rt_down_read(struct rw_semaphore *rwsem);
-extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
-extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-extern void rt_up_read(struct rw_semaphore *rwsem);
-extern void rt_up_write(struct rw_semaphore *rwsem);
-extern void rt_downgrade_write(struct rw_semaphore *rwsem);
-
-#define init_rwsem(sem) rt_init_rwsem(sem)
-#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
-
-static inline void down_read(struct rw_semaphore *sem)
-{
- rt_down_read(sem);
-}
-
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
- return rt_down_read_trylock(sem);
-}
-
-static inline void down_write(struct rw_semaphore *sem)
-{
- rt_down_write(sem);
-}
-
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
- return rt_down_write_trylock(sem);
-}
-
-static inline void up_read(struct rw_semaphore *sem)
-{
- rt_up_read(sem);
-}
-
-static inline void up_write(struct rw_semaphore *sem)
-{
- rt_up_write(sem);
-}
-
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
- rt_downgrade_write(sem);
-}
-
-static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-{
- return rt_down_read_nested(sem, subclass);
-}
-
-static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-{
- rt_down_write_nested(sem, subclass);
-}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, &nest_lock->dep_map);
-}
-
-#else
-
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, NULL);
-}
-#endif
-#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 625a41f..b1e963e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -23,7 +23,6 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
@@ -53,7 +52,6 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
-#include <linux/hardirq.h>
#include <asm/processor.h>
@@ -167,8 +165,11 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED)
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_dead(task) ((task)->exit_state != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
@@ -1021,7 +1022,6 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1064,12 +1064,6 @@ struct task_struct {
#endif
unsigned int policy;
-#ifdef CONFIG_PREEMPT_RT_FULL
- int migrate_disable;
-# ifdef CONFIG_SCHED_DEBUG
- int migrate_disable_atomic;
-# endif
-#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1165,8 +1159,7 @@ struct task_struct {
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- raw_spinlock_t vtime_lock;
- seqcount_t vtime_seq;
+ seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
@@ -1182,9 +1175,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct task_struct *posix_timer_list;
-#endif
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
@@ -1216,15 +1206,10 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* TODO: move me into ->restart_block ? */
- struct siginfo forced_info;
-#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -1261,9 +1246,6 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- int pagefault_disabled;
-#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1339,9 +1321,6 @@ struct task_struct {
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
-#ifdef CONFIG_DEBUG_PREEMPT
- unsigned long preempt_disable_ip;
-#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
@@ -1409,12 +1388,6 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- u64 preempt_timestamp_hist;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- long timer_offset;
-#endif
-#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
@@ -1438,19 +1411,11 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
- int softirq_nestcnt;
- unsigned int softirqs_raised;
-#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
- int kmap_idx;
- pte_t kmap_pte[KM_TYPE_NR];
-# endif
-#endif
};
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
@@ -1463,17 +1428,6 @@ static inline void set_numabalancing_state(bool enabled)
}
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
-#else
-static inline bool cur_pf_disabled(void) { return false; }
-#endif
-
-static inline bool pagefault_disabled(void)
-{
- return in_atomic() || cur_pf_disabled();
-}
-
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
@@ -1609,15 +1563,6 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __put_task_struct_cb(struct rcu_head *rhp);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->put_rcu, __put_task_struct_cb);
-}
-#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1625,7 +1570,6 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
-#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
@@ -1664,7 +1608,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
-#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1810,10 +1753,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
-int migrate_me(void);
-void tell_sched_cpu_down_begin(int cpu);
-void tell_sched_cpu_down_done(int cpu);
-
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -1826,9 +1765,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
-static inline int migrate_me(void) { return 0; }
-static inline void tell_sched_cpu_down_begin(int cpu) { }
-static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
@@ -2036,7 +1972,6 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2151,24 +2086,12 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __mmdrop_delayed(struct rcu_head *rhp);
-static inline void mmdrop_delayed(struct mm_struct *mm)
-{
- if (atomic_dec_and_test(&mm->mm_count))
- call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-}
-#else
-# define mmdrop_delayed(mm) mmdrop(mm)
-#endif
-
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
@@ -2451,52 +2374,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-#ifdef CONFIG_PREEMPT_LAZY
-static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-}
-
-static inline int need_resched_lazy(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED) ||
- test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-#else
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-static inline int need_resched_lazy(void) { return 0; }
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-#endif
-
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -2528,49 +2405,9 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
-static inline bool __task_is_stopped_or_traced(struct task_struct *task)
-{
- if (task->state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#endif
- return false;
-}
-
-static inline bool task_is_stopped_or_traced(struct task_struct *task)
-{
- bool traced_stopped;
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- unsigned long flags;
-
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- traced_stopped = __task_is_stopped_or_traced(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-#else
- traced_stopped = __task_is_stopped_or_traced(task);
-#endif
- return traced_stopped;
-}
-
-static inline bool task_is_traced(struct task_struct *task)
+static inline int need_resched(void)
{
- bool traced = false;
-
- if (task->state & __TASK_TRACED)
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* in case the task is sleeping on tasklist_lock */
- raw_spin_lock_irq(&task->pi_lock);
- if (task->state & __TASK_TRACED)
- traced = true;
- else if (task->saved_state & __TASK_TRACED)
- traced = true;
- raw_spin_unlock_irq(&task->pi_lock);
-#endif
- return traced;
+ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
/*
@@ -2589,7 +2426,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -2600,16 +2437,12 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
-#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
-#else
-# define cond_resched_softirq() cond_resched()
-#endif
static inline void cond_resched_rcu(void)
{
@@ -2795,26 +2628,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-static inline int __migrate_disabled(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- return p->migrate_disable;
-#else
- return 0;
-#endif
-}
-
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (p->migrate_disable)
- return cpumask_of(task_cpu(p));
-#endif
-
- return &p->cpus_allowed;
-}
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 4d54d6c..440434d 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -35,7 +35,6 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -46,10 +45,6 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
-{
- return 0;
-}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 019a936..21a2093 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -152,30 +152,18 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void __write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- preempt_disable_rt();
- __write_seqcount_begin(s);
-}
-
-static inline void __write_seqcount_end(seqcount_t *s)
+static inline void write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
-static inline void write_seqcount_end(seqcount_t *s)
-{
- __write_seqcount_end(s);
- preempt_enable_rt();
-}
-
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
@@ -216,32 +204,10 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
-#else
-/*
- * Starvation safe read side for RT
- */
-static inline unsigned read_seqbegin(seqlock_t *sl)
-{
- unsigned ret;
-
-repeat:
- ret = ACCESS_ONCE(sl->seqcount.sequence);
- if (unlikely(ret & 1)) {
- /*
- * Take the lock and let the writer proceed (i.e. evtl
- * boost it), otherwise we could loop here forever.
- */
- spin_unlock_wait(&sl->lock);
- goto repeat;
- }
- return ret;
-}
-#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
@@ -256,36 +222,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -294,7 +260,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
return flags;
}
@@ -304,7 +270,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 1414eb2..2ac423b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -226,7 +226,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void flush_task_sigqueue(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3bb9cf3..9995165 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -133,7 +133,6 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
- raw_spinlock_t raw_lock;
};
struct sk_buff;
@@ -1074,12 +1073,6 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
-static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
-{
- raw_spin_lock_init(&list->raw_lock);
- __skb_queue_head_init(list);
-}
-
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index e05b694..731f523 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,9 +188,6 @@ static inline void __smp_call_function_single(int cpuid,
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
-#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-#define put_cpu_light() migrate_enable()
-
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a124f92..75f3494 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -262,11 +262,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_rt.h>
-#else
-# include <linux/rwlock.h>
-#endif
+#include <linux/rwlock.h>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -277,10 +273,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_rt.h>
-#else /* PREEMPT_RT_FULL */
-
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -410,6 +402,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 1356078..bdb9993 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -191,8 +191,6 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_api_smp.h>
-#endif
+#include <linux/rwlock_api_smp.h>
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
deleted file mode 100644
index ac6f08b..0000000
--- a/include/linux/spinlock_rt.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __LINUX_SPINLOCK_RT_H
-#define __LINUX_SPINLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#include <linux/bug.h>
-
-extern void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
-
-#define spin_lock_init(slock) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key); \
-} while (0)
-
-extern void __lockfunc rt_spin_lock(spinlock_t *lock);
-extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
-extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
-extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
-extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
-extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
-extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
-extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
-
-/*
- * lockdep-less calls, for derived types like rwlock:
- * (for trylock they can use rt_mutex_trylock() directly.
- */
-extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
-extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-
-#define spin_lock(lock) \
- do { \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_irq(lock) spin_lock(lock)
-
-#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
-
-#define spin_trylock(lock) \
-({ \
- int __locked; \
- migrate_disable(); \
- __locked = spin_do_trylock(lock); \
- if (!__locked) \
- migrate_enable(); \
- __locked; \
-})
-
-#ifdef CONFIG_LOCKDEP
-# define spin_lock_nested(lock, subclass) \
- do { \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-#else
-# define spin_lock_nested(lock, subclass) spin_lock(lock)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-#endif
-
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-
-static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
-{
- unsigned long flags = 0;
-#ifdef CONFIG_TRACE_IRQFLAGS
- flags = rt_spin_lock_trace_flags(lock);
-#else
- spin_lock(lock); /* lock_local */
-#endif
- return flags;
-}
-
-/* FIXME: we need rt_spin_lock_nest_lock */
-#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-
-#define spin_unlock(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define spin_unlock_bh(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
-#define spin_unlock_irq(lock) spin_unlock(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- spin_unlock(lock); \
- } while (0)
-
-#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) spin_trylock(lock)
-
-#define spin_trylock_irqsave(lock, flags) \
- rt_spin_trylock_irqsave(lock, &(flags))
-
-#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
-
-#ifdef CONFIG_GENERIC_LOCKBREAK
-# define spin_is_contended(lock) ((lock)->break_lock)
-#else
-# define spin_is_contended(lock) (((void)(lock), 0))
-#endif
-
-static inline int spin_can_lock(spinlock_t *lock)
-{
- return !rt_mutex_is_locked(&lock->lock);
-}
-
-static inline int spin_is_locked(spinlock_t *lock)
-{
- return rt_mutex_is_locked(&lock->lock);
-}
-
-static inline void assert_spin_locked(spinlock_t *lock)
-{
- BUG_ON(!spin_is_locked(lock));
-}
-
-#define atomic_dec_and_lock(atomic, lock) \
- atomic_dec_and_spin_lock(atomic, lock)
-
-#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 10bac71..73548eb 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,15 +9,80 @@
* Released under the General Public License (GPL).
*/
-#include <linux/spinlock_types_raw.h>
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_types_nort.h>
-# include <linux/rwlock_types.h>
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# include <linux/rtmutex.h>
-# include <linux/spinlock_types_rt.h>
-# include <linux/rwlock_types_rt.h>
+# define SPIN_DEBUG_INIT(lockname)
#endif
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
+#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
+
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
deleted file mode 100644
index f1dac1f..0000000
--- a/include/linux/spinlock_types_nort.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
-#define __LINUX_SPINLOCK_TYPES_NORT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * The non RT version maps spinlocks to raw_spinlocks
- */
-typedef struct spinlock {
- union {
- struct raw_spinlock rlock;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- struct {
- u8 __padding[LOCK_PADSIZE];
- struct lockdep_map dep_map;
- };
-#endif
- };
-} spinlock_t;
-
-#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-
-#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
deleted file mode 100644
index edffc4d..0000000
--- a/include/linux/spinlock_types_raw.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-#define __LINUX_SPINLOCK_TYPES_RAW_H
-
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
deleted file mode 100644
index 9fd4319..0000000
--- a/include/linux/spinlock_types_rt.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RT_H
-#define __LINUX_SPINLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-#include <linux/cache.h>
-
-/*
- * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
- */
-typedef struct spinlock {
- struct rt_mutex lock;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- .file = __FILE__, \
- .line = __LINE__ , \
- }
-#else
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- }
-#endif
-
-/*
-.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
-*/
-
-#define __SPIN_LOCK_UNLOCKED(name) \
- { .lock = __RT_SPIN_INITIALIZER(name.lock), \
- SPIN_DEP_MAP_INIT(name) }
-
-#define __DEFINE_SPINLOCK(name) \
- spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
-
-#define DEFINE_SPINLOCK(name) \
- spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index d5e50dd..c114614 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
void process_srcu(struct work_struct *work);
-#define __SRCU_STRUCT_INIT(name, pcpu_name) \
+#define __SRCU_STRUCT_INIT(name) \
{ \
.completed = -300, \
- .per_cpu_ref = &pcpu_name, \
+ .per_cpu_ref = &name##_srcu_array, \
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
@@ -104,12 +104,11 @@ void process_srcu(struct work_struct *work);
*/
#define DEFINE_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name);
#define DEFINE_STATIC_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- static struct srcu_struct name = __SRCU_STRUCT_INIT(\
- name, name##_srcu_array);
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b15655f..14a8ff2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,7 +25,6 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5fcd72c..8c5a197 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 64f8646..96c2324 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -203,7 +203,7 @@ struct tty_port {
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* TTY flags ASY_*/
unsigned char console:1, /* port is a console */
- low_latency:1; /* direct buffer flush */
+ low_latency:1; /* optional: tune for latency */
struct mutex mutex; /* Locking */
struct mutex buf_mutex; /* Buffer alloc lock */
unsigned char *xmit_buf; /* Optional buffer */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 44b3751..5ca0951 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,37 +6,38 @@
/*
* These routines enable/disable the pagefault handler in that
- * it will not take any MM locks and go straight to the fixup table.
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
*/
-static inline void raw_pagefault_disable(void)
+static inline void pagefault_disable(void)
{
inc_preempt_count();
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
barrier();
}
-static inline void raw_pagefault_enable(void)
+static inline void pagefault_enable(void)
{
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
barrier();
dec_preempt_count();
+ /*
+ * make sure we do..
+ */
barrier();
preempt_check_resched();
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline void pagefault_disable(void)
-{
- raw_pagefault_disable();
-}
-
-static inline void pagefault_enable(void)
-{
- raw_pagefault_enable();
-}
-#else
-extern void pagefault_disable(void);
-extern void pagefault_enable(void);
-#endif
-
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -76,9 +77,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
mm_segment_t old_fs = get_fs(); \
\
set_fs(KERNEL_DS); \
- raw_pagefault_disable(); \
+ pagefault_disable(); \
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
- raw_pagefault_enable(); \
+ pagefault_enable(); \
set_fs(old_fs); \
ret; \
})
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index d115f62..06f28be 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
-#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e303eef..0662e98 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -30,7 +30,7 @@ struct usbnet {
struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
- wait_queue_head_t *wait;
+ wait_queue_head_t wait;
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1ea2fd5..a67b384 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -29,9 +29,7 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- preempt_disable_rt();
__this_cpu_inc(vm_event_states.event[item]);
- preempt_enable_rt();
}
static inline void count_vm_event(enum vm_event_item item)
@@ -41,9 +39,7 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- preempt_disable_rt();
__this_cpu_add(vm_event_states.event[item], delta);
- preempt_enable_rt();
}
static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
deleted file mode 100644
index f86bca2..0000000
--- a/include/linux/wait-simple.h
+++ /dev/null
@@ -1,207 +0,0 @@
-#ifndef _LINUX_WAIT_SIMPLE_H
-#define _LINUX_WAIT_SIMPLE_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <asm/current.h>
-
-struct swaiter {
- struct task_struct *task;
- struct list_head node;
-};
-
-#define DEFINE_SWAITER(name) \
- struct swaiter name = { \
- .task = current, \
- .node = LIST_HEAD_INIT((name).node), \
- }
-
-struct swait_head {
- raw_spinlock_t lock;
- struct list_head list;
-};
-
-#define SWAIT_HEAD_INITIALIZER(name) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .list = LIST_HEAD_INIT((name).list), \
- }
-
-#define DEFINE_SWAIT_HEAD(name) \
- struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
-
-extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
-
-#define init_swait_head(swh) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_swait_head((swh), &__key); \
- } while (0)
-
-/*
- * Waiter functions
- */
-extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
-extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_finish(struct swait_head *head, struct swaiter *w);
-
-/* Check whether a head has waiters enqueued */
-static inline bool swaitqueue_active(struct swait_head *h)
-{
- /* Make sure the condition is visible before checking list_empty() */
- smp_mb();
- return !list_empty(&h->list);
-}
-
-/*
- * Wakeup functions
- */
-extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
-extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
-
-#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
-#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
-#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
-#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
-
-/*
- * Event API
- */
-#define __swait_event(wq, condition) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __swait_event(wq, condition); \
-} while (0)
-
-#define __swait_event_interruptible(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-#define __swait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __swait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- int __ret = timeout; \
- if (!(condition)) \
- __swait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __swait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
- */
-#define swait_event_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __swait_event_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 68f7245..a67fc16 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -7,7 +7,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
#include <uapi/linux/wait.h>
-#include <linux/atomic.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
diff --git a/include/net/dst.h b/include/net/dst.h
index a158a07..3c4c944 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -395,7 +395,7 @@ static inline void dst_confirm(struct dst_entry *dst)
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
- struct hh_cache *hh;
+ const struct hh_cache *hh;
if (dst->pending_confirm) {
unsigned long now = jiffies;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index a9d84bf..536501a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -333,7 +333,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
}
#endif
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int seq;
int hh_len;
@@ -388,7 +388,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
-static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
+static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
const struct net_device *dev)
{
unsigned int seq;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 5e9ce7f..bf2ec22 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -57,7 +57,6 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
- int sysctl_icmp_echo_sysrq;
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
diff --git a/include/net/sock.h b/include/net/sock.h
index 808cbc2..6e2c490 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1459,6 +1459,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+static inline void sock_release_ownership(struct sock *sk)
+{
+ sk->sk_lock.owned = 0;
+}
+
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 51dcc6f..31c4890 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -484,20 +484,21 @@ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_SYN_COOKIES
#include <linux/ktime.h>
-/* Syncookies use a monotonic timer which increments every 64 seconds.
+/* Syncookies use a monotonic timer which increments every 60 seconds.
* This counter is used both as a hash input and partially encoded into
* the cookie value. A cookie is only validated further if the delta
* between the current counter value and the encoded one is less than this,
- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
#define MAX_SYNCOOKIE_AGE 2
static inline u32 tcp_cookie_time(void)
{
- struct timespec now;
- getnstimeofday(&now);
- return now.tv_sec >> 6; /* 64 seconds granularity */
+ u64 val = get_jiffies_64();
+
+ do_div(val, 60 * HZ);
+ return val;
}
extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
deleted file mode 100644
index 6122e42..0000000
--- a/include/trace/events/hist.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hist
-
-#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_HIST_H
-
-#include "latency_hist.h"
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
-#define trace_preemptirqsoff_hist(a, b)
-#else
-TRACE_EVENT(preemptirqsoff_hist,
-
- TP_PROTO(int reason, int starthist),
-
- TP_ARGS(reason, starthist),
-
- TP_STRUCT__entry(
- __field(int, reason)
- __field(int, starthist)
- ),
-
- TP_fast_assign(
- __entry->reason = reason;
- __entry->starthist = starthist;
- ),
-
- TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
- __entry->starthist ? "start" : "stop")
-);
-#endif
-
-#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
-#define trace_hrtimer_interrupt(a, b, c, d)
-#else
-TRACE_EVENT(hrtimer_interrupt,
-
- TP_PROTO(int cpu, long long offset, struct task_struct *curr,
- struct task_struct *task),
-
- TP_ARGS(cpu, offset, curr, task),
-
- TP_STRUCT__entry(
- __field(int, cpu)
- __field(long long, offset)
- __array(char, ccomm, TASK_COMM_LEN)
- __field(int, cprio)
- __array(char, tcomm, TASK_COMM_LEN)
- __field(int, tprio)
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->offset = offset;
- memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
- __entry->cprio = curr->prio;
- memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
- task != NULL ? TASK_COMM_LEN : 7);
- __entry->tprio = task != NULL ? task->prio : -1;
- ),
-
- TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
- __entry->cpu, __entry->offset, __entry->ccomm,
- __entry->cprio, __entry->tcomm, __entry->tprio)
-);
-#endif
-
-#endif /* _TRACE_HIST_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
deleted file mode 100644
index d3f2fbd..0000000
--- a/include/trace/events/latency_hist.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _LATENCY_HIST_H
-#define _LATENCY_HIST_H
-
-enum hist_action {
- IRQS_ON,
- PREEMPT_ON,
- TRACE_STOP,
- IRQS_OFF,
- PREEMPT_OFF,
- TRACE_START,
-};
-
-static char *actions[] = {
- "IRQS_ON",
- "PREEMPT_ON",
- "TRACE_STOP",
- "IRQS_OFF",
- "PREEMPT_OFF",
- "TRACE_START",
-};
-
-static inline char *getaction(int action)
-{
- if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
- return actions[action];
- return "unknown";
-}
-
-#endif /* _LATENCY_HIST_H */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 712ea36..645d749 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -303,15 +303,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef __array
#define __array(type, item, len) \
do { \
- mutex_lock(&event_storage_mutex); \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h
index f1f3dd5..84c517c 100644
--- a/include/uapi/linux/fd.h
+++ b/include/uapi/linux/fd.h
@@ -185,7 +185,8 @@ enum {
* to clear media change status */
FD_UNUSED_BIT,
FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
- FD_DISK_WRITABLE_BIT /* disk is writable */
+ FD_DISK_WRITABLE_BIT, /* disk is writable */
+ FD_OPEN_SHOULD_FAIL_BIT
};
#define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
diff --git a/init/Kconfig b/init/Kconfig
index f6aeea6..d42dc7c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -615,7 +615,7 @@ config RCU_FANOUT_EXACT
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
- depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL
+ depends on NO_HZ_COMMON && SMP
default n
help
This option permits CPUs to enter dynticks-idle state even if
@@ -642,7 +642,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU
- default y if PREEMPT_RT_FULL
+ default n
help
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
@@ -1042,7 +1042,6 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
- depends on !PREEMPT_RT_FULL
default n
help
This feature lets you explicitly allocate real CPU bandwidth
@@ -1407,6 +1406,13 @@ config FUTEX
support for "fast userspace mutexes". The resulting kernel may not
run glibc-based applications correctly.
+config HAVE_FUTEX_CMPXCHG
+ bool
+ help
+ Architectures should select this if futex_atomic_cmpxchg_inatomic()
+ is implemented and always working. This removes a couple of runtime
+ checks.
+
config EPOLL
bool "Enable eventpoll support" if EXPERT
default y
@@ -1576,7 +1582,6 @@ choice
config SLAB
bool "SLAB"
- depends on !PREEMPT_RT_FULL
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
@@ -1595,7 +1600,6 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
- depends on !PREEMPT_RT_FULL
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
diff --git a/init/Makefile b/init/Makefile
index 88cf473..7bc47ee 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -33,4 +33,4 @@ silent_chk_compile.h = :
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
diff --git a/init/main.c b/init/main.c
index e8087f7..63d3e8f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -6,7 +6,7 @@
* GK 2/5/95 - Changed to support mounting root fs via NFS
* Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
* Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
- * Simplified starting of init: Michael A. Griffith <grif@acm.org>
+ * Simplified starting of init: Michael A. Griffith <grif@acm.org>
*/
#define DEBUG /* Enable initcall_debug */
@@ -74,7 +74,6 @@
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
-#include <linux/posix-timers.h>
#include <linux/sched_clock.h>
#include <linux/context_tracking.h>
#include <linux/random.h>
@@ -508,7 +507,6 @@ asmlinkage void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
- softirq_early_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 2276120..bb0248f 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -923,17 +923,12 @@ static inline void pipelined_send(struct mqueue_inode_info *info,
struct msg_msg *message,
struct ext_wait_queue *receiver)
{
- /*
- * Keep them in one critical section for PREEMPT_RT:
- */
- preempt_disable_rt();
receiver->msg = message;
list_del(&receiver->list);
receiver->state = STATE_PENDING;
wake_up_process(receiver->task);
smp_wmb();
receiver->state = STATE_READY;
- preempt_enable_rt();
}
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
@@ -947,18 +942,13 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
wake_up_interruptible(&info->wait_q);
return;
}
- /*
- * Keep them in one critical section for PREEMPT_RT:
- */
- preempt_disable_rt();
- if (!msg_insert(sender->msg, info)) {
- list_del(&sender->list);
- sender->state = STATE_PENDING;
- wake_up_process(sender->task);
- smp_wmb();
- sender->state = STATE_READY;
- }
- preempt_enable_rt();
+ if (msg_insert(sender->msg, info))
+ return;
+ list_del(&sender->list);
+ sender->state = STATE_PENDING;
+ wake_up_process(sender->task);
+ smp_wmb();
+ sender->state = STATE_READY;
}
SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
diff --git a/ipc/msg.c b/ipc/msg.c
index 1cf8b2c..52770bf 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -253,18 +253,10 @@ static void expunge_all(struct msg_queue *msq, int res)
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- /*
- * Make sure that the wakeup doesnt preempt
- * this CPU prematurely. (on PREEMPT_RT)
- */
- preempt_disable_rt();
-
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
-
- preempt_enable_rt();
}
}
@@ -644,12 +636,6 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
- /*
- * Make sure that the wakeup doesnt preempt
- * this CPU prematurely. (on PREEMPT_RT)
- */
- preempt_disable_rt();
-
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
@@ -663,11 +649,9 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
- preempt_enable_rt();
return 1;
}
- preempt_enable_rt();
}
}
return 0;
diff --git a/ipc/sem.c b/ipc/sem.c
index 258b45e..db9d241 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -666,13 +666,6 @@ undo:
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct task_struct *p = q->sleeper;
- get_task_struct(p);
- q->status = error;
- wake_up_process(p);
- put_task_struct(p);
-#else
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
@@ -684,7 +677,6 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
q->pid = error;
list_add_tail(&q->list, pt);
-#endif
}
/**
@@ -698,7 +690,6 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
struct sem_queue *q, *t;
int did_something;
@@ -711,7 +702,6 @@ static void wake_up_sem_queue_do(struct list_head *pt)
}
if (did_something)
preempt_enable();
-#endif
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 8bb92eb..d2b32ac 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -222,4 +222,4 @@ endif
config MUTEX_SPIN_ON_OWNER
def_bool y
- depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
+ depends on SMP && !DEBUG_MUTEXES
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 11dbe26..3f9c974 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,16 +1,3 @@
-config PREEMPT
- bool
- select PREEMPT_COUNT
-
-config PREEMPT_RT_BASE
- bool
- select PREEMPT
-
-config HAVE_PREEMPT_LAZY
- bool
-
-config PREEMPT_LAZY
- def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
choice
prompt "Preemption Model"
@@ -46,9 +33,9 @@ config PREEMPT_VOLUNTARY
Select this if you are building a kernel for a desktop system.
-config PREEMPT__LL
+config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
- select PREEMPT
+ select PREEMPT_COUNT
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
This option reduces the latency of the kernel by making
@@ -65,22 +52,6 @@ config PREEMPT__LL
embedded system with latency requirements in the milliseconds
range.
-config PREEMPT_RTB
- bool "Preemptible Kernel (Basic RT)"
- select PREEMPT_RT_BASE
- help
- This option is basically the same as (Low-Latency Desktop) but
- enables changes which are preliminary for the full preemptible
- RT kernel.
-
-config PREEMPT_RT_FULL
- bool "Fully Preemptible Kernel (RT)"
- depends on IRQ_FORCED_THREADING
- select PREEMPT_RT_BASE
- select PREEMPT_RCU
- help
- All and everything
-
endchoice
config PREEMPT_COUNT
diff --git a/kernel/Makefile b/kernel/Makefile
index b3ff0a8..1ce4755 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,10 +7,10 @@ obj-y = fork.o exec_domain.o panic.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
rcupdate.o extable.o params.o posix-timers.o \
- kthread.o wait.o sys_ni.o posix-cpu-timers.o \
- hrtimer.o nsproxy.o srcu.o semaphore.o \
+ kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
+ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o reboot.o \
- async.o range.o groups.o lglock.o smpboot.o wait-simple.o
+ async.o range.o groups.o lglock.o smpboot.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
@@ -33,11 +33,7 @@ obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
-ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
-obj-y += mutex.o
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-obj-y += rwsem.o
-endif
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
@@ -49,7 +45,6 @@ endif
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
-obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
ifneq ($(CONFIG_SMP),y)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c4f8bc79..1c204fd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4410,16 +4410,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
rcu_assign_pointer(cgrp->name, name);
/*
- * Temporarily set the pointer to NULL, so idr_find() won't return
- * a half-baked cgroup.
- */
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
- if (cgrp->id < 0) {
- err = -ENOMEM;
- goto err_free_name;
- }
-
- /*
* Only live parents can have children. Note that the liveliness
* check isn't strictly necessary because cgroup_mkdir() and
* cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4428,7 +4418,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
*/
if (!cgroup_lock_live_group(parent)) {
err = -ENODEV;
- goto err_free_id;
+ goto err_free_name;
}
/* Grab a reference on the superblock so the hierarchy doesn't
@@ -4438,6 +4428,16 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
* fs */
atomic_inc(&sb->s_active);
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+ if (cgrp->id < 0) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
init_cgroup_housekeeping(cgrp);
dentry->d_fsdata = cgrp;
@@ -4544,11 +4544,11 @@ err_free_all:
ss->css_free(css);
}
}
+ idr_remove(&root->cgroup_idr, cgrp->id);
+err_unlock:
mutex_unlock(&cgroup_mutex);
/* Release the reference count that we took on the superblock */
deactivate_super(sb);
-err_free_id:
- idr_remove(&root->cgroup_idr, cgrp->id);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ba7416b..d7f07a2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -63,290 +63,6 @@ static struct {
.refcount = 0,
};
-/**
- * hotplug_pcp - per cpu hotplug descriptor
- * @unplug: set when pin_current_cpu() needs to sync tasks
- * @sync_tsk: the task that waits for tasks to finish pinned sections
- * @refcount: counter of tasks in pinned sections
- * @grab_lock: set when the tasks entering pinned sections should wait
- * @synced: notifier for @sync_tsk to tell cpu_down it's finished
- * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
- * @mutex_init: zero if the mutex hasn't been initialized yet.
- *
- * Although @unplug and @sync_tsk may point to the same task, the @unplug
- * is used as a flag and still exists after @sync_tsk has exited and
- * @sync_tsk set to NULL.
- */
-struct hotplug_pcp {
- struct task_struct *unplug;
- struct task_struct *sync_tsk;
- int refcount;
- int grab_lock;
- struct completion synced;
- struct completion unplug_wait;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * Note, on PREEMPT_RT, the hotplug lock must save the state of
- * the task, otherwise the mutex will cause the task to fail
- * to sleep when required. (Because it's called from migrate_disable())
- *
- * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
- * state.
- */
- spinlock_t lock;
-#else
- struct mutex mutex;
-#endif
- int mutex_init;
-};
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
-# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
-#else
-# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
-# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
-#endif
-
-static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
-
-/**
- * pin_current_cpu - Prevent the current cpu from being unplugged
- *
- * Lightweight version of get_online_cpus() to prevent cpu from being
- * unplugged when code runs in a migration disabled region.
- *
- * Must be called with preemption disabled (preempt_count = 1)!
- */
-void pin_current_cpu(void)
-{
- struct hotplug_pcp *hp;
- int force = 0;
-
-retry:
- hp = &__get_cpu_var(hotplug_pcp);
-
- if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
- hp->unplug == current) {
- hp->refcount++;
- return;
- }
- if (hp->grab_lock) {
- preempt_enable();
- hotplug_lock(hp);
- hotplug_unlock(hp);
- } else {
- preempt_enable();
- /*
- * Try to push this task off of this CPU.
- */
- if (!migrate_me()) {
- preempt_disable();
- hp = &__get_cpu_var(hotplug_pcp);
- if (!hp->grab_lock) {
- /*
- * Just let it continue it's already pinned
- * or about to sleep.
- */
- force = 1;
- goto retry;
- }
- preempt_enable();
- }
- }
- preempt_disable();
- goto retry;
-}
-
-/**
- * unpin_current_cpu - Allow unplug of current cpu
- *
- * Must be called with preemption or interrupts disabled!
- */
-void unpin_current_cpu(void)
-{
- struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
-
- WARN_ON(hp->refcount <= 0);
-
- /* This is safe. sync_unplug_thread is pinned to this cpu */
- if (!--hp->refcount && hp->unplug && hp->unplug != current)
- wake_up_process(hp->unplug);
-}
-
-static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (hp->refcount) {
- schedule_preempt_disabled();
- set_current_state(TASK_UNINTERRUPTIBLE);
- }
-}
-
-static int sync_unplug_thread(void *data)
-{
- struct hotplug_pcp *hp = data;
-
- wait_for_completion(&hp->unplug_wait);
- preempt_disable();
- hp->unplug = current;
- wait_for_pinned_cpus(hp);
-
- /*
- * This thread will synchronize the cpu_down() with threads
- * that have pinned the CPU. When the pinned CPU count reaches
- * zero, we inform the cpu_down code to continue to the next step.
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
- preempt_enable();
- complete(&hp->synced);
-
- /*
- * If all succeeds, the next step will need tasks to wait till
- * the CPU is offline before continuing. To do this, the grab_lock
- * is set and tasks going into pin_current_cpu() will block on the
- * mutex. But we still need to wait for those that are already in
- * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
- * will kick this thread out.
- */
- while (!hp->grab_lock && !kthread_should_stop()) {
- schedule();
- set_current_state(TASK_UNINTERRUPTIBLE);
- }
-
- /* Make sure grab_lock is seen before we see a stale completion */
- smp_mb();
-
- /*
- * Now just before cpu_down() enters stop machine, we need to make
- * sure all tasks that are in pinned CPU sections are out, and new
- * tasks will now grab the lock, keeping them from entering pinned
- * CPU sections.
- */
- if (!kthread_should_stop()) {
- preempt_disable();
- wait_for_pinned_cpus(hp);
- preempt_enable();
- complete(&hp->synced);
- }
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (!kthread_should_stop()) {
- schedule();
- set_current_state(TASK_UNINTERRUPTIBLE);
- }
- set_current_state(TASK_RUNNING);
-
- /*
- * Force this thread off this CPU as it's going down and
- * we don't want any more work on this CPU.
- */
- current->flags &= ~PF_NO_SETAFFINITY;
- do_set_cpus_allowed(current, cpu_present_mask);
- migrate_me();
- return 0;
-}
-
-static void __cpu_unplug_sync(struct hotplug_pcp *hp)
-{
- wake_up_process(hp->sync_tsk);
- wait_for_completion(&hp->synced);
-}
-
-static void __cpu_unplug_wait(unsigned int cpu)
-{
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-
- complete(&hp->unplug_wait);
- wait_for_completion(&hp->synced);
-}
-
-/*
- * Start the sync_unplug_thread on the target cpu and wait for it to
- * complete.
- */
-static int cpu_unplug_begin(unsigned int cpu)
-{
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- int err;
-
- /* Protected by cpu_hotplug.lock */
- if (!hp->mutex_init) {
-#ifdef CONFIG_PREEMPT_RT_FULL
- spin_lock_init(&hp->lock);
-#else
- mutex_init(&hp->mutex);
-#endif
- hp->mutex_init = 1;
- }
-
- /* Inform the scheduler to migrate tasks off this CPU */
- tell_sched_cpu_down_begin(cpu);
-
- init_completion(&hp->synced);
- init_completion(&hp->unplug_wait);
-
- hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(hp->sync_tsk)) {
- err = PTR_ERR(hp->sync_tsk);
- hp->sync_tsk = NULL;
- return err;
- }
- kthread_bind(hp->sync_tsk, cpu);
-
- /*
- * Wait for tasks to get out of the pinned sections,
- * it's still OK if new tasks enter. Some CPU notifiers will
- * wait for tasks that are going to enter these sections and
- * we must not have them block.
- */
- wake_up_process(hp->sync_tsk);
- return 0;
-}
-
-static void cpu_unplug_sync(unsigned int cpu)
-{
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-
- init_completion(&hp->synced);
- /* The completion needs to be initialzied before setting grab_lock */
- smp_wmb();
-
- /* Grab the mutex before setting grab_lock */
- hotplug_lock(hp);
- hp->grab_lock = 1;
-
- /*
- * The CPU notifiers have been completed.
- * Wait for tasks to get out of pinned CPU sections and have new
- * tasks block until the CPU is completely down.
- */
- __cpu_unplug_sync(hp);
-
- /* All done with the sync thread */
- kthread_stop(hp->sync_tsk);
- hp->sync_tsk = NULL;
-}
-
-static void cpu_unplug_done(unsigned int cpu)
-{
- struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
-
- hp->unplug = NULL;
- /* Let all tasks know cpu unplug is finished before cleaning up */
- smp_wmb();
-
- if (hp->sync_tsk)
- kthread_stop(hp->sync_tsk);
-
- if (hp->grab_lock) {
- hotplug_unlock(hp);
- /* protected by cpu_hotplug.lock */
- hp->grab_lock = 0;
- }
- tell_sched_cpu_down_done(cpu);
-}
-
void get_online_cpus(void)
{
might_sleep();
@@ -363,14 +79,15 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
-
mutex_lock(&cpu_hotplug.lock);
+
if (WARN_ON(!cpu_hotplug.refcount))
cpu_hotplug.refcount++; /* try to fix things up */
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
mutex_unlock(&cpu_hotplug.lock);
+
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -565,15 +282,13 @@ static int __ref take_cpu_down(void *_param)
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
- int mycpu, err, nr_calls = 0;
+ int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
- cpumask_var_t cpumask;
- cpumask_var_t cpumask_org;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -581,34 +296,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
- /* Move the downtaker off the unplug cpu */
- if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
- return -ENOMEM;
- if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
- free_cpumask_var(cpumask);
- return -ENOMEM;
- }
-
- cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
- cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
- set_cpus_allowed_ptr(current, cpumask);
- free_cpumask_var(cpumask);
- migrate_disable();
- mycpu = smp_processor_id();
- if (mycpu == cpu) {
- printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
- migrate_enable();
- err = -EBUSY;
- goto restore_cpus;
- }
- migrate_enable();
-
cpu_hotplug_begin();
- err = cpu_unplug_begin(cpu);
- if (err) {
- printk("cpu_unplug_begin(%d) failed\n", cpu);
- goto out_cancel;
- }
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -618,13 +306,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
__func__, cpu);
goto out_release;
}
-
- __cpu_unplug_wait(cpu);
smpboot_park_threads(cpu);
- /* Notifiers are done. Don't let any more tasks pin this CPU. */
- cpu_unplug_sync(cpu);
-
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
@@ -653,14 +336,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
check_for_tasks(cpu);
out_release:
- cpu_unplug_done(cpu);
-out_cancel:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
-restore_cpus:
- set_cpus_allowed_ptr(current, cpumask_org);
- free_cpumask_var(cpumask_org);
return err;
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 399dba6..14ff484 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -554,6 +554,7 @@ int vkdb_printf(const char *fmt, va_list ap)
int linecount;
int colcount;
int logging, saved_loglevel = 0;
+ int saved_trap_printk;
int got_printf_lock = 0;
int retlen = 0;
int fnd, len;
@@ -564,6 +565,8 @@ int vkdb_printf(const char *fmt, va_list ap)
unsigned long uninitialized_var(flags);
preempt_disable();
+ saved_trap_printk = kdb_trap_printk;
+ kdb_trap_printk = 0;
/* Serialize kdb_printf if multiple cpus try to write at once.
* But if any cpu goes recursive in kdb, just print the output,
@@ -830,6 +833,7 @@ kdb_print_out:
} else {
__release(kdb_printf_lock);
}
+ kdb_trap_printk = saved_trap_printk;
preempt_enable();
return retlen;
}
@@ -839,11 +843,9 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
- kdb_trap_printk++;
va_start(ap, fmt);
r = vkdb_printf(fmt, ap);
va_end(ap);
- kdb_trap_printk--;
return r;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 420de7f..fea4f6c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6029,7 +6029,6 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
- hwc->hrtimer.irqsafe = 1;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
diff --git a/kernel/exit.c b/kernel/exit.c
index 7493b32..dcde2c4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk)
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
- flush_task_sigqueue(tsk);
+ flush_sigqueue(&tsk->pending);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
@@ -559,9 +559,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
list_move_tail(&p->sibling, &p->real_parent->children);
-
- if (p->exit_state == EXIT_DEAD)
- return;
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
@@ -569,9 +566,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
if (same_thread_group(p->real_parent, father))
return;
- /* We don't want people slaying init. */
+ /*
+ * We don't want people slaying init.
+ *
+ * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
+ * can change ->exit_state to EXIT_ZOMBIE. If this is the final
+ * state, do_notify_parent() was already called and ->exit_signal
+ * doesn't matter.
+ */
p->exit_signal = SIGCHLD;
+ if (p->exit_state == EXIT_DEAD)
+ return;
+
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
@@ -783,6 +790,8 @@ void do_exit(long code)
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
+ if (group_dead)
+ disassociate_ctty(1);
exit_task_namespaces(tsk);
exit_task_work(tsk);
check_stack_usage();
@@ -798,13 +807,9 @@ void do_exit(long code)
cgroup_exit(tsk, 1);
- if (group_dead)
- disassociate_ctty(1);
-
module_put(task_thread_info(tsk)->exec_domain->module);
proc_exit_connector(tsk);
-
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
diff --git a/kernel/fork.c b/kernel/fork.c
index ae9a1a4..458953c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -94,7 +94,7 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
-DEFINE_RWLOCK(tasklist_lock); /* outer */
+__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
@@ -230,9 +230,7 @@ static inline void put_signal_struct(struct signal_struct *sig)
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-static
-#endif
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
@@ -247,18 +245,7 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
-#ifndef CONFIG_PREEMPT_RT_BASE
EXPORT_SYMBOL_GPL(__put_task_struct);
-#else
-void __put_task_struct_cb(struct rcu_head *rhp)
-{
- struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
-
- __put_task_struct(tsk);
-
-}
-EXPORT_SYMBOL_GPL(__put_task_struct_cb);
-#endif
void __init __weak arch_task_cache_init(void) { }
@@ -611,19 +598,6 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
-#ifdef CONFIG_PREEMPT_RT_BASE
-/*
- * RCU callback for delayed mm drop. Not strictly rcu, but we don't
- * want another facility to make this work.
- */
-void __mmdrop_delayed(struct rcu_head *rhp)
-{
- struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
-
- __mmdrop(mm);
-}
-#endif
-
/*
* Decrement the use count and release all resources for an mm.
*/
@@ -1133,9 +1107,6 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- tsk->posix_timer_list = NULL;
-#endif
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
@@ -1264,7 +1235,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
- p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
@@ -1272,8 +1242,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- raw_spin_lock_init(&p->vtime_lock);
- seqcount_init(&p->vtime_seq);
+ seqlock_init(&p->vtime_seqlock);
p->vtime_snap = 0;
p->vtime_snap_whence = VTIME_SLEEPING;
#endif
@@ -1326,9 +1295,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- p->pagefault_disabled = 0;
-#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
@@ -1690,7 +1656,7 @@ SYSCALL_DEFINE0(fork)
#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
0, NULL, NULL);
}
#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index 3b85a95..d8347b7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -68,7 +68,9 @@
#include "rtmutex_common.h"
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
+#endif
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
@@ -573,9 +575,7 @@ void exit_pi_state_list(struct task_struct *curr)
* task still owns the PI-state:
*/
if (head->next != next) {
- raw_spin_unlock_irq(&curr->pi_lock);
spin_unlock(&hb->lock);
- raw_spin_lock_irq(&curr->pi_lock);
continue;
}
@@ -1449,16 +1449,6 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
- } else if (ret == -EAGAIN) {
- /*
- * Waiter was woken by timeout or
- * signal and has set pi_blocked_on to
- * PI_WAKEUP_INPROGRESS before we
- * tried to enqueue it on the rtmutex.
- */
- this->pi_state = NULL;
- free_pi_state(pi_state);
- continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
@@ -2302,7 +2292,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
- struct futex_hash_bucket *hb, *hb2;
+ struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
@@ -2327,7 +2317,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
- rt_mutex_init_waiter(&rt_waiter, false);
+ debug_rt_mutex_init_waiter(&rt_waiter);
+ rt_waiter.task = NULL;
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
@@ -2348,55 +2339,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
- /*
- * On RT we must avoid races with requeue and trying to block
- * on two mutexes (hb->lock and uaddr2's rtmutex) by
- * serializing access to pi_blocked_on with pi_lock.
- */
- raw_spin_lock_irq(&current->pi_lock);
- if (current->pi_blocked_on) {
- /*
- * We have been requeued or are in the process of
- * being requeued.
- */
- raw_spin_unlock_irq(&current->pi_lock);
- } else {
- /*
- * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
- * prevents a concurrent requeue from moving us to the
- * uaddr2 rtmutex. After that we can safely acquire
- * (and possibly block on) hb->lock.
- */
- current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
- raw_spin_unlock_irq(&current->pi_lock);
-
- spin_lock(&hb->lock);
-
- /*
- * Clean up pi_blocked_on. We might leak it otherwise
- * when we succeeded with the hb->lock in the fast
- * path.
- */
- raw_spin_lock_irq(&current->pi_lock);
- current->pi_blocked_on = NULL;
- raw_spin_unlock_irq(&current->pi_lock);
-
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
- spin_unlock(&hb->lock);
- if (ret)
- goto out_put_keys;
- }
+ spin_lock(&hb->lock);
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+ spin_unlock(&hb->lock);
+ if (ret)
+ goto out_put_keys;
/*
- * In order to be here, we have either been requeued, are in
- * the process of being requeued, or requeue successfully
- * acquired uaddr2 on our behalf. If pi_blocked_on was
- * non-null above, we may be racing with a requeue. Do not
- * rely on q->lock_ptr to be hb2->lock until after blocking on
- * hb->lock or hb2->lock. The futex_requeue dropped our key1
- * reference and incremented our key2 reference count.
+ * In order for us to be here, we know our q.key == key2, and since
+ * we took the hb->lock above, we also know that futex_requeue() has
+ * completed and we no longer have to concern ourselves with a wakeup
+ * race with the atomic proxy lock acquisition by the requeue code. The
+ * futex_requeue dropped our key1 reference and incremented our key2
+ * reference count.
*/
- hb2 = hash_futex(&key2);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
@@ -2405,10 +2361,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
- spin_lock(&hb2->lock);
- BUG_ON(&hb2->lock != q.lock_ptr);
+ spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
- spin_unlock(&hb2->lock);
+ spin_unlock(q.lock_ptr);
}
} else {
/*
@@ -2421,8 +2376,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
- spin_lock(&hb2->lock);
- BUG_ON(&hb2->lock != q.lock_ptr);
+ spin_lock(q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
@@ -2779,10 +2733,10 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
-static int __init futex_init(void)
+static void __init futex_detect_cmpxchg(void)
{
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
- int i;
/*
* This will fail and we want it. Some arch implementations do
@@ -2796,6 +2750,14 @@ static int __init futex_init(void)
*/
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
+#endif
+}
+
+static int __init futex_init(void)
+{
+ int i;
+
+ futex_detect_cmpxchg();
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
plist_head_init(&futex_queues[i].chain);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c19183d..383319b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -47,13 +47,11 @@
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/timer.h>
-#include <linux/kthread.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <trace/events/timer.h>
-#include <trace/events/hist.h>
/*
* The timer bases:
@@ -609,7 +607,8 @@ static int hrtimer_reprogram(struct hrtimer *timer,
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
- * reprogramming is handled at the end of the hrtimer_interrupt.
+ * reprogramming is handled either by the softirq, which called the
+ * callback or at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
@@ -644,9 +643,6 @@ static int hrtimer_reprogram(struct hrtimer *timer,
return res;
}
-static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
-static int hrtimer_rt_defer(struct hrtimer *timer);
-
/*
* Initialize the high resolution related parts of cpu_base
*/
@@ -663,18 +659,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base,
- int wakeup)
+ struct hrtimer_clock_base *base)
{
- if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base)))
- return 0;
- if (!wakeup)
- return -ETIME;
-#ifdef CONFIG_PREEMPT_RT_BASE
- if (!hrtimer_rt_defer(timer))
- return -ETIME;
-#endif
- return 1;
+ return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -742,44 +729,6 @@ static void clock_was_set_work(struct work_struct *work)
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * RT can not call schedule_work from real interrupt context.
- * Need to make a thread to do the real work.
- */
-static struct task_struct *clock_set_delay_thread;
-static bool do_clock_set_delay;
-
-static int run_clock_set_delay(void *ignore)
-{
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (do_clock_set_delay) {
- do_clock_set_delay = false;
- schedule_work(&hrtimer_work);
- }
- schedule();
- }
- __set_current_state(TASK_RUNNING);
- return 0;
-}
-
-void clock_was_set_delayed(void)
-{
- do_clock_set_delay = true;
- /* Make visible before waking up process */
- smp_wmb();
- wake_up_process(clock_set_delay_thread);
-}
-
-static __init int create_clock_set_delay_thread(void)
-{
- clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
- BUG_ON(!clock_set_delay_thread);
- return 0;
-}
-early_initcall(create_clock_set_delay_thread);
-#else /* PREEMPT_RT_FULL */
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* interrupt device on all cpus.
@@ -788,7 +737,6 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
-#endif
#else
@@ -798,18 +746,12 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base,
- int wakeup)
+ struct hrtimer_clock_base *base)
{
return 0;
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -928,32 +870,6 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
-
-/**
- * hrtimer_wait_for_timer - Wait for a running timer
- *
- * @timer: timer to wait for
- *
- * The function waits in case the timers callback function is
- * currently executed on the waitqueue of the timer base. The
- * waitqueue is woken up after the timer callback function has
- * finished execution.
- */
-void hrtimer_wait_for_timer(const struct hrtimer *timer)
-{
- struct hrtimer_clock_base *base = timer->base;
-
- if (base && base->cpu_base && !timer->irqsafe)
- wait_event(base->cpu_base->wait,
- !(timer->state & HRTIMER_STATE_CALLBACK));
-}
-
-#else
-# define wake_up_timer_waiters(b) do { } while (0)
-#endif
-
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -997,11 +913,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
- if (unlikely(!list_empty(&timer->cb_entry))) {
- list_del_init(&timer->cb_entry);
- goto out;
- }
-
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -1086,17 +997,6 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
#endif
}
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- {
- ktime_t now = new_base->get_time();
-
- if (ktime_to_ns(tim) < ktime_to_ns(now))
- timer->praecox = now;
- else
- timer->praecox = ktime_set(0, 0);
- }
-#endif
-
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
timer_stats_hrtimer_set_start_info(timer);
@@ -1109,19 +1009,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
*
* XXX send_remote_softirq() ?
*/
- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
- ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
- if (ret < 0) {
- /*
- * In case we failed to reprogram the timer (mostly
- * because out current timer is already elapsed),
- * remove it again and report a failure. This avoids
- * stale base->first entries.
- */
- debug_deactivate(timer);
- __remove_hrtimer(timer, new_base,
- timer->state & HRTIMER_STATE_CALLBACK, 0);
- } else if (ret > 0) {
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
+ && hrtimer_enqueue_reprogram(timer, new_base)) {
+ if (wakeup) {
/*
* We need to drop cpu_base->lock to avoid a
* lock ordering issue vs. rq->lock.
@@ -1129,7 +1019,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
raw_spin_unlock(&new_base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
local_irq_restore(flags);
- return 0;
+ return ret;
+ } else {
+ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
}
@@ -1219,7 +1111,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
- hrtimer_wait_for_timer(timer);
+ cpu_relax();
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1298,7 +1190,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
- INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1382,126 +1273,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
-static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- /*
- * Note, we clear the callback flag before we requeue the
- * timer otherwise we trigger the callback_running() check
- * in hrtimer_reprogram().
- */
- timer->state &= ~HRTIMER_STATE_CALLBACK;
-
- if (restart != HRTIMER_NORESTART) {
- BUG_ON(hrtimer_active(timer));
- /*
- * Enqueue the timer, if it's the leftmost timer then
- * we need to reprogram it.
- */
- if (!enqueue_hrtimer(timer, base))
- return;
-
-#ifndef CONFIG_HIGH_RES_TIMERS
- }
-#else
- if (base->cpu_base->hres_active &&
- hrtimer_reprogram(timer, base))
- goto requeue;
-
- } else if (hrtimer_active(timer)) {
- /*
- * If the timer was rearmed on another CPU, reprogram
- * the event device.
- */
- if (&timer->node == base->active.next &&
- base->cpu_base->hres_active &&
- hrtimer_reprogram(timer, base))
- goto requeue;
- }
- return;
-
-requeue:
- /*
- * Timer is expired. Thus move it from tree to pending list
- * again.
- */
- __remove_hrtimer(timer, base, timer->state, 0);
- list_add_tail(&timer->cb_entry, &base->expired);
-#endif
-}
-
-/*
- * The changes in mainline which removed the callback modes from
- * hrtimer are not yet working with -rt. The non wakeup_process()
- * based callbacks which involve sleeping locks need to be treated
- * seperately.
- */
-static void hrtimer_rt_run_pending(void)
-{
- enum hrtimer_restart (*fn)(struct hrtimer *);
- struct hrtimer_cpu_base *cpu_base;
- struct hrtimer_clock_base *base;
- struct hrtimer *timer;
- int index, restart;
-
- local_irq_disable();
- cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
-
- raw_spin_lock(&cpu_base->lock);
-
- for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
- base = &cpu_base->clock_base[index];
-
- while (!list_empty(&base->expired)) {
- timer = list_first_entry(&base->expired,
- struct hrtimer, cb_entry);
-
- /*
- * Same as the above __run_hrtimer function
- * just we run with interrupts enabled.
- */
- debug_hrtimer_deactivate(timer);
- __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
- timer_stats_account_hrtimer(timer);
- fn = timer->function;
-
- raw_spin_unlock_irq(&cpu_base->lock);
- restart = fn(timer);
- raw_spin_lock_irq(&cpu_base->lock);
-
- hrtimer_rt_reprogram(restart, timer, base);
- }
- }
-
- raw_spin_unlock_irq(&cpu_base->lock);
-
- wake_up_timer_waiters(cpu_base);
-}
-
-static int hrtimer_rt_defer(struct hrtimer *timer)
-{
- if (timer->irqsafe)
- return 0;
-
- __remove_hrtimer(timer, timer->base, timer->state, 0);
- list_add_tail(&timer->cb_entry, &timer->base->expired);
- return 1;
-}
-
-#else
-
-static inline void hrtimer_rt_run_pending(void)
-{
- hrtimer_peek_ahead_timers();
-}
-
-static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
-
-#endif
-
#ifdef CONFIG_HIGH_RES_TIMERS
/*
@@ -1512,7 +1283,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
- int i, retries = 0, raise = 0;
+ int i, retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1547,15 +1318,6 @@ retry:
timer = container_of(node, struct hrtimer, node);
- trace_hrtimer_interrupt(raw_smp_processor_id(),
- ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
- timer->praecox : hrtimer_get_expires(timer),
- basenow)),
- current,
- timer->function == hrtimer_wakeup ?
- container_of(timer, struct hrtimer_sleeper,
- timer)->task : NULL);
-
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
@@ -1581,10 +1343,7 @@ retry:
break;
}
- if (!hrtimer_rt_defer(timer))
- __run_hrtimer(timer, &basenow);
- else
- raise = 1;
+ __run_hrtimer(timer, &basenow);
}
}
@@ -1599,7 +1358,7 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
- goto out;
+ return;
}
/*
@@ -1643,9 +1402,6 @@ retry:
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
-out:
- if (raise)
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
@@ -1681,16 +1437,40 @@ void hrtimer_peek_ahead_timers(void)
__hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
+
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_peek_ahead_timers();
+}
+
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
-
-static void run_hrtimer_softirq(struct softirq_action *h)
+/*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ *
+ * For HRT its the fall back code to run the softirq in the timer
+ * softirq context in case the hrtimer initialization failed or has
+ * not been done yet.
+ */
+void hrtimer_run_pending(void)
{
- hrtimer_rt_run_pending();
+ if (hrtimer_hres_active())
+ return;
+
+ /*
+ * This _is_ ugly: We have to check in the softirq context,
+ * whether we can switch to highres and / or nohz mode. The
+ * clocksource switch happens in the timer interrupt with
+ * xtime_lock held. Notification from there only sets the
+ * check bit in the tick_oneshot code, otherwise we might
+ * deadlock vs. xtime_lock.
+ */
+ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
+ hrtimer_switch_to_hres();
}
/*
@@ -1701,18 +1481,11 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- int index, gettime = 1, raise = 0;
+ int index, gettime = 1;
if (hrtimer_hres_active())
return;
- /*
- * Check whether we can switch to highres mode.
- */
- if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())
- && hrtimer_switch_to_hres())
- return;
-
for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
base = &cpu_base->clock_base[index];
if (!timerqueue_getnext(&base->active))
@@ -1733,16 +1506,10 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- if (!hrtimer_rt_defer(timer))
- __run_hrtimer(timer, &base->softirq_time);
- else
- raise = 1;
+ __run_hrtimer(timer, &base->softirq_time);
}
raw_spin_unlock(&cpu_base->lock);
}
-
- if (raise)
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
@@ -1764,18 +1531,16 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
- sl->timer.irqsafe = 1;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
- unsigned long state)
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
{
hrtimer_init_sleeper(t, current);
do {
- set_current_state(state);
+ set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t->timer, mode);
if (!hrtimer_active(&t->timer))
t->task = NULL;
@@ -1819,8 +1584,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
- /* cpu_chill() does not care about restart state. */
- if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
+ if (do_nanosleep(&t, HRTIMER_MODE_ABS))
goto out;
rmtp = restart->nanosleep.rmtp;
@@ -1837,10 +1601,8 @@ out:
return ret;
}
-static long
-__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- const enum hrtimer_mode mode, const clockid_t clockid,
- unsigned long state)
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
@@ -1853,7 +1615,7 @@ __hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
- if (do_nanosleep(&t, mode, state))
+ if (do_nanosleep(&t, mode))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
@@ -1880,12 +1642,6 @@ out:
return ret;
}
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- const enum hrtimer_mode mode, const clockid_t clockid)
-{
- return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
-}
-
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
@@ -1900,26 +1656,6 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * Sleep for 1 ms in hope whoever holds what we want will let it go.
- */
-void cpu_chill(void)
-{
- struct timespec tu = {
- .tv_nsec = NSEC_PER_MSEC,
- };
- unsigned int freeze_flag = current->flags & PF_NOFREEZE;
-
- current->flags |= PF_NOFREEZE;
- __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
- TASK_UNINTERRUPTIBLE);
- if (!freeze_flag)
- current->flags &= ~PF_NOFREEZE;
-}
-EXPORT_SYMBOL(cpu_chill);
-#endif
-
/*
* Functions related to boot-time initialization:
*/
@@ -1931,13 +1667,9 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
- INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
}
hrtimer_init_hres(cpu_base);
-#ifdef CONFIG_PREEMPT_RT_BASE
- init_waitqueue_head(&cpu_base->wait);
-#endif
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -2050,7 +1782,9 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
+#ifdef CONFIG_HIGH_RES_TIMERS
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+#endif
}
/**
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 7f50c55..131ca17 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -132,8 +132,6 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
- struct pt_regs *regs = get_irq_regs();
- u64 ip = regs ? instruction_pointer(regs) : 0;
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
@@ -174,11 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
-#ifndef CONFIG_PREEMPT_RT_FULL
- add_interrupt_randomness(irq, flags, ip);
-#else
- desc->random_ip = ip;
-#endif
+ add_interrupt_randomness(irq, flags);
if (!noirqdebug)
note_interrupt(irq, desc, retval);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 252bf10..4c84746 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,7 +22,6 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -31,7 +30,6 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
-# endif
#endif
/**
@@ -164,62 +162,6 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void _irq_affinity_notify(struct irq_affinity_notify *notify);
-static struct task_struct *set_affinity_helper;
-static LIST_HEAD(affinity_list);
-static DEFINE_RAW_SPINLOCK(affinity_list_lock);
-
-static int set_affinity_thread(void *unused)
-{
- while (1) {
- struct irq_affinity_notify *notify;
- int empty;
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- raw_spin_lock_irq(&affinity_list_lock);
- empty = list_empty(&affinity_list);
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (empty)
- schedule();
- if (kthread_should_stop())
- break;
- set_current_state(TASK_RUNNING);
-try_next:
- notify = NULL;
-
- raw_spin_lock_irq(&affinity_list_lock);
- if (!list_empty(&affinity_list)) {
- notify = list_first_entry(&affinity_list,
- struct irq_affinity_notify, list);
- list_del_init(&notify->list);
- }
- raw_spin_unlock_irq(&affinity_list_lock);
-
- if (!notify)
- continue;
- _irq_affinity_notify(notify);
- goto try_next;
- }
- return 0;
-}
-
-static void init_helper_thread(void)
-{
- if (set_affinity_helper)
- return;
- set_affinity_helper = kthread_run(set_affinity_thread, NULL,
- "affinity-cb");
- WARN_ON(IS_ERR(set_affinity_helper));
-}
-#else
-
-static inline void init_helper_thread(void) { }
-
-#endif
-
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -238,17 +180,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- raw_spin_lock(&affinity_list_lock);
- if (list_empty(&desc->affinity_notify->list))
- list_add_tail(&affinity_list,
- &desc->affinity_notify->list);
- raw_spin_unlock(&affinity_list_lock);
- wake_up_process(set_affinity_helper);
-#else
schedule_work(&desc->affinity_notify->work);
-#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -289,8 +221,10 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+static void irq_affinity_notify(struct work_struct *work)
{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -312,13 +246,6 @@ out:
kref_put(&notify->kref, notify->release);
}
-static void irq_affinity_notify(struct work_struct *work)
-{
- struct irq_affinity_notify *notify =
- container_of(work, struct irq_affinity_notify, work);
- _irq_affinity_notify(notify);
-}
-
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -348,8 +275,6 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
- INIT_LIST_HEAD(&notify->list);
- init_helper_thread();
}
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -856,15 +781,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- /*
- * Interrupts which have real time requirements can be set up
- * to avoid softirq processing in the thread handler. This is
- * safe as these interrupts do not raise soft interrupts.
- */
- if (irq_settings_no_softirq_call(desc))
- _local_bh_enable();
- else
- local_bh_enable();
+ local_bh_enable();
return ret;
}
@@ -947,12 +864,6 @@ static int irq_thread(void *data)
if (!noirqdebug)
note_interrupt(action->irq, desc, action_ret);
-#ifdef CONFIG_PREEMPT_RT_FULL
- migrate_disable();
- add_interrupt_randomness(action->irq, 0,
- desc->random_ip ^ (unsigned long) action);
- migrate_enable();
-#endif
wake_threads_waitq(desc);
}
@@ -1215,9 +1126,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
- if (new->flags & IRQF_NO_SOFTIRQ_CALL)
- irq_settings_set_no_softirq_call(desc);
-
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 0d2c381..1162f10 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -14,7 +14,6 @@ enum {
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
- _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -27,7 +26,6 @@ enum {
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
-#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -38,16 +36,6 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
-static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
-{
- return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
-}
-
-static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
-{
- desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
-}
-
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e5a309a..7b5f012 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -340,10 +340,6 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- return 1;
-#endif
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -356,10 +352,6 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
-#ifdef CONFIG_PREEMPT_RT_BASE
- pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- return 1;
-#endif
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 35d21f9..55fcce6 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -20,9 +20,6 @@
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static DEFINE_PER_CPU(struct llist_head, hirq_work_list);
-#endif
static DEFINE_PER_CPU(int, irq_work_raised);
/*
@@ -51,11 +48,7 @@ static bool irq_work_claim(struct irq_work *work)
return true;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-void arch_irq_work_raise(void)
-#else
void __weak arch_irq_work_raise(void)
-#endif
{
/*
* Lame architectures will get the timer tick callback
@@ -77,12 +70,8 @@ void irq_work_queue(struct irq_work *work)
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (work->flags & IRQ_WORK_HARD_IRQ)
- llist_add(&work->llnode, &__get_cpu_var(hirq_work_list));
- else
-#endif
- llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
+ llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
+
/*
* If the work is not "lazy" or the tick is stopped, raise the irq
* work interrupt (if supported by the arch), otherwise, just wait
@@ -126,18 +115,12 @@ static void __irq_work_run(void)
__this_cpu_write(irq_work_raised, 0);
barrier();
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (in_irq())
- this_list = &__get_cpu_var(hirq_work_list);
- else
-#endif
- this_list = &__get_cpu_var(irq_work_list);
+ this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list))
return;
-#ifndef CONFIG_PREEMPT_RT_FULL
BUG_ON(!irqs_disabled());
-#endif
+
llnode = llist_del_all(this_list);
while (llnode != NULL) {
work = llist_entry(llnode, struct irq_work, llnode);
@@ -169,9 +152,7 @@ static void __irq_work_run(void)
*/
void irq_work_run(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
BUG_ON(!in_irq());
-#endif
__irq_work_run();
}
EXPORT_SYMBOL_GPL(irq_work_run);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index d051390..8d262b4 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -213,7 +213,6 @@ again:
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
- hrtimer_wait_for_timer(&tsk->signal->real_timer);
goto again;
}
expires = timeval_to_ktime(value->it_value);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index b66ab9e..9659d38 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -132,15 +132,6 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_KEXEC */
-#if defined(CONFIG_PREEMPT_RT_FULL)
-static ssize_t realtime_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", 1);
-}
-KERNEL_ATTR_RO(realtime);
-#endif
-
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -206,9 +197,6 @@ static struct attribute * kernel_attrs[] = {
&vmcoreinfo_attr.attr,
#endif
&rcu_expedited_attr.attr,
-#ifdef CONFIG_PREEMPT_RT_FULL
- &realtime_attr.attr,
-#endif
NULL
};
diff --git a/kernel/lglock.c b/kernel/lglock.c
index f2356df..86ae2ae 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -4,15 +4,6 @@
#include <linux/cpu.h>
#include <linux/string.h>
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define lg_lock_ptr arch_spinlock_t
-# define lg_do_lock(l) arch_spin_lock(l)
-# define lg_do_unlock(l) arch_spin_unlock(l)
-#else
-# define lg_lock_ptr struct rt_mutex
-# define lg_do_lock(l) __rt_spin_lock(l)
-# define lg_do_unlock(l) __rt_spin_unlock(l)
-#endif
/*
* Note there is no uninit, so lglocks cannot be defined in
* modules (but it's fine to use them from there)
@@ -21,60 +12,51 @@
void lg_lock_init(struct lglock *lg, char *name)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- int i;
-
- for_each_possible_cpu(i) {
- struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
-
- rt_mutex_init(lock);
- }
-#endif
LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
}
EXPORT_SYMBOL(lg_lock_init);
void lg_local_lock(struct lglock *lg)
{
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
- migrate_disable();
+ preempt_disable();
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
- lg_do_lock(lock);
+ arch_spin_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock);
void lg_local_unlock(struct lglock *lg)
{
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
- lg_do_unlock(lock);
- migrate_enable();
+ arch_spin_unlock(lock);
+ preempt_enable();
}
EXPORT_SYMBOL(lg_local_unlock);
void lg_local_lock_cpu(struct lglock *lg, int cpu)
{
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
- preempt_disable_nort();
+ preempt_disable();
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
- lg_do_lock(lock);
+ arch_spin_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock_cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
- lg_do_unlock(lock);
- preempt_enable_nort();
+ arch_spin_unlock(lock);
+ preempt_enable();
}
EXPORT_SYMBOL(lg_local_unlock_cpu);
@@ -82,12 +64,12 @@ void lg_global_lock(struct lglock *lg)
{
int i;
- preempt_disable_nort();
+ preempt_disable();
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
for_each_possible_cpu(i) {
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
- lg_do_lock(lock);
+ arch_spin_lock(lock);
}
}
EXPORT_SYMBOL(lg_global_lock);
@@ -98,10 +80,10 @@ void lg_global_unlock(struct lglock *lg)
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
- lg_lock_ptr *lock;
+ arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
- lg_do_unlock(lock);
+ arch_spin_unlock(lock);
}
- preempt_enable_nort();
+ preempt_enable();
}
EXPORT_SYMBOL(lg_global_unlock);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b74f7a5..e16c45b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3541,7 +3541,6 @@ static void check_flags(unsigned long flags)
}
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
@@ -3556,7 +3555,6 @@ static void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
-#endif
if (!debug_locks)
print_irqtrace_events(current);
diff --git a/kernel/panic.c b/kernel/panic.c
index 936d00f..b6c482c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -368,11 +368,9 @@ static u64 oops_id;
static int init_oops_id(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
if (!oops_id)
get_random_bytes(&oops_id, sizeof(oops_id));
else
-#endif
oops_id++;
return 0;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 4208655..55e9560 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -312,7 +312,9 @@ static void *pidns_get(struct task_struct *task)
struct pid_namespace *ns;
rcu_read_lock();
- ns = get_pid_ns(task_active_pid_ns(task));
+ ns = task_active_pid_ns(task);
+ if (ns)
+ get_pid_ns(ns);
rcu_read_unlock();
return ns;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 4bf82f8..c7f31aa 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -3,7 +3,6 @@
*/
#include <linux/sched.h>
-#include <linux/sched/rt.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
@@ -664,7 +663,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- BUG_ON_NONRT(!irqs_disabled());
+ BUG_ON(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1111,7 +1110,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- BUG_ON_NONRT(!irqs_disabled());
+ BUG_ON(!irqs_disabled());
arm_timer(timer);
spin_unlock(&p->sighand->siglock);
@@ -1178,11 +1177,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
- unsigned long flags;
- raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ raw_spin_lock(&sig->cputimer.lock);
group_sample = sig->cputimer.cputime;
- raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+ raw_spin_unlock(&sig->cputimer.lock);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
@@ -1196,13 +1194,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
-static void __run_posix_cpu_timers(struct task_struct *tsk)
+void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
- BUG_ON_NONRT(!irqs_disabled());
+ BUG_ON(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
@@ -1267,190 +1265,6 @@ static void __run_posix_cpu_timers(struct task_struct *tsk)
posix_cpu_timer_kick_nohz();
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-#include <linux/kthread.h>
-#include <linux/cpu.h>
-DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
-DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
-
-static int posix_cpu_timers_thread(void *data)
-{
- int cpu = (long)data;
-
- BUG_ON(per_cpu(posix_timer_task,cpu) != current);
-
- while (!kthread_should_stop()) {
- struct task_struct *tsk = NULL;
- struct task_struct *next = NULL;
-
- if (cpu_is_offline(cpu))
- goto wait_to_die;
-
- /* grab task list */
- raw_local_irq_disable();
- tsk = per_cpu(posix_timer_tasklist, cpu);
- per_cpu(posix_timer_tasklist, cpu) = NULL;
- raw_local_irq_enable();
-
- /* its possible the list is empty, just return */
- if (!tsk) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- __set_current_state(TASK_RUNNING);
- continue;
- }
-
- /* Process task list */
- while (1) {
- /* save next */
- next = tsk->posix_timer_list;
-
- /* run the task timers, clear its ptr and
- * unreference it
- */
- __run_posix_cpu_timers(tsk);
- tsk->posix_timer_list = NULL;
- put_task_struct(tsk);
-
- /* check if this is the last on the list */
- if (next == tsk)
- break;
- tsk = next;
- }
- }
- return 0;
-
-wait_to_die:
- /* Wait for kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
- return 0;
-}
-
-static inline int __fastpath_timer_check(struct task_struct *tsk)
-{
- /* tsk == current, ensure it is safe to use ->signal/sighand */
- if (unlikely(tsk->exit_state))
- return 0;
-
- if (!task_cputime_zero(&tsk->cputime_expires))
- return 1;
-
- if (!task_cputime_zero(&tsk->signal->cputime_expires))
- return 1;
-
- return 0;
-}
-
-void run_posix_cpu_timers(struct task_struct *tsk)
-{
- unsigned long cpu = smp_processor_id();
- struct task_struct *tasklist;
-
- BUG_ON(!irqs_disabled());
- if(!per_cpu(posix_timer_task, cpu))
- return;
- /* get per-cpu references */
- tasklist = per_cpu(posix_timer_tasklist, cpu);
-
- /* check to see if we're already queued */
- if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
- get_task_struct(tsk);
- if (tasklist) {
- tsk->posix_timer_list = tasklist;
- } else {
- /*
- * The list is terminated by a self-pointing
- * task_struct
- */
- tsk->posix_timer_list = tsk;
- }
- per_cpu(posix_timer_tasklist, cpu) = tsk;
-
- wake_up_process(per_cpu(posix_timer_task, cpu));
- }
-}
-
-/*
- * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
- * Here we can start up the necessary migration thread for the new CPU.
- */
-static int posix_cpu_thread_call(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- struct task_struct *p;
- struct sched_param param;
-
- switch (action) {
- case CPU_UP_PREPARE:
- p = kthread_create(posix_cpu_timers_thread, hcpu,
- "posixcputmr/%d",cpu);
- if (IS_ERR(p))
- return NOTIFY_BAD;
- p->flags |= PF_NOFREEZE;
- kthread_bind(p, cpu);
- /* Must be high prio to avoid getting starved */
- param.sched_priority = MAX_RT_PRIO-1;
- sched_setscheduler(p, SCHED_FIFO, &param);
- per_cpu(posix_timer_task,cpu) = p;
- break;
- case CPU_ONLINE:
- /* Strictly unneccessary, as first user will wake it. */
- wake_up_process(per_cpu(posix_timer_task,cpu));
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_UP_CANCELED:
- /* Unbind it from offline cpu so it can run. Fall thru. */
- kthread_bind(per_cpu(posix_timer_task, cpu),
- cpumask_any(cpu_online_mask));
- kthread_stop(per_cpu(posix_timer_task,cpu));
- per_cpu(posix_timer_task,cpu) = NULL;
- break;
- case CPU_DEAD:
- kthread_stop(per_cpu(posix_timer_task,cpu));
- per_cpu(posix_timer_task,cpu) = NULL;
- break;
-#endif
- }
- return NOTIFY_OK;
-}
-
-/* Register at highest priority so that task migration (migrate_all_tasks)
- * happens before everything else.
- */
-static struct notifier_block posix_cpu_thread_notifier = {
- .notifier_call = posix_cpu_thread_call,
- .priority = 10
-};
-
-static int __init posix_cpu_thread_init(void)
-{
- void *hcpu = (void *)(long)smp_processor_id();
- /* Start one for boot CPU. */
- unsigned long cpu;
-
- /* init the per-cpu posix_timer_tasklets */
- for_each_possible_cpu(cpu)
- per_cpu(posix_timer_tasklist, cpu) = NULL;
-
- posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
- posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
- register_cpu_notifier(&posix_cpu_thread_notifier);
- return 0;
-}
-early_initcall(posix_cpu_thread_init);
-#else /* CONFIG_PREEMPT_RT_BASE */
-void run_posix_cpu_timers(struct task_struct *tsk)
-{
- __run_posix_cpu_timers(tsk);
-}
-#endif /* CONFIG_PREEMPT_RT_BASE */
-
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a22b931..424c2d4 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -497,7 +497,6 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
- int sig = event->sigev_signo;
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
@@ -506,8 +505,7 @@ static struct pid *good_sigevent(sigevent_t * event)
return NULL;
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
- (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
- sig_kernel_coredump(sig)))
+ ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
return NULL;
return task_pid(rtn);
@@ -818,20 +816,6 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
return overrun;
}
-/*
- * Protected by RCU!
- */
-static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (kc->timer_set == common_timer_set)
- hrtimer_wait_for_timer(&timr->it.real.timer);
- else
- /* FIXME: Whacky hack for posix-cpu-timers */
- schedule_timeout(1);
-#endif
-}
-
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
@@ -909,7 +893,6 @@ retry:
if (!timr)
return -EINVAL;
- rcu_read_lock();
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -918,12 +901,9 @@ retry:
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
- timer_wait_for_callback(kc, timr);
rtn = NULL; // We already got the old time...
- rcu_read_unlock();
goto retry;
}
- rcu_read_unlock();
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
@@ -961,15 +941,10 @@ retry_delete:
if (!timer)
return -EINVAL;
- rcu_read_lock();
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
- timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- timer);
- rcu_read_unlock();
goto retry_delete;
}
- rcu_read_unlock();
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -995,18 +970,8 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
- /* On RT we can race with a deletion */
- if (!timer->it_signal) {
- unlock_timer(timer, flags);
- return;
- }
-
if (timer_delete_hook(timer) == TIMER_RETRY) {
- rcu_read_lock();
unlock_timer(timer, flags);
- timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- timer);
- rcu_read_unlock();
goto retry_delete;
}
list_del(&timer->list);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index d26958b..0121dab 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -275,8 +275,6 @@ static int create_image(int platform_mode)
local_irq_disable();
- system_state = SYSTEM_SUSPEND;
-
error = syscore_suspend();
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
@@ -304,7 +302,6 @@ static int create_image(int platform_mode)
syscore_resume();
Enable_irqs:
- system_state = SYSTEM_RUNNING;
local_irq_enable();
Enable_cpus:
@@ -430,7 +427,6 @@ static int resume_target_kernel(bool platform_mode)
goto Enable_cpus;
local_irq_disable();
- system_state = SYSTEM_SUSPEND;
error = syscore_suspend();
if (error)
@@ -464,7 +460,6 @@ static int resume_target_kernel(bool platform_mode)
syscore_resume();
Enable_irqs:
- system_state = SYSTEM_RUNNING;
local_irq_enable();
Enable_cpus:
@@ -547,7 +542,6 @@ int hibernation_platform_enter(void)
goto Platform_finish;
local_irq_disable();
- system_state = SYSTEM_SUSPEND;
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
@@ -560,7 +554,6 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
- system_state = SYSTEM_RUNNING;
local_irq_enable();
enable_nonboot_cpus();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index e6703bb..62ee437 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -218,8 +218,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
- system_state = SYSTEM_SUSPEND;
-
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
@@ -230,8 +228,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
syscore_resume();
}
- system_state = SYSTEM_RUNNING;
-
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 0a63f7b..c59896c 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1029,7 +1029,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
{
char *text;
int len = 0;
- int attempts = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
@@ -1041,14 +1040,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 seq;
u32 idx;
enum log_flags prev;
- int num_msg;
-try_again:
- attempts++;
- if (attempts > 10) {
- len = -EBUSY;
- goto out;
- }
- num_msg = 0;
+
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
@@ -1069,14 +1061,6 @@ try_again:
prev = msg->flags;
idx = log_next(idx);
seq++;
- num_msg++;
- if (num_msg > 5) {
- num_msg = 0;
- raw_spin_unlock_irq(&logbuf_lock);
- raw_spin_lock_irq(&logbuf_lock);
- if (clear_seq < log_first_seq)
- goto try_again;
- }
}
/* move first record forward until length fits into the buffer */
@@ -1090,21 +1074,12 @@ try_again:
prev = msg->flags;
idx = log_next(idx);
seq++;
- num_msg++;
- if (num_msg > 5) {
- num_msg = 0;
- raw_spin_unlock_irq(&logbuf_lock);
- raw_spin_lock_irq(&logbuf_lock);
- if (clear_seq < log_first_seq)
- goto try_again;
- }
}
/* last message fitting into this dump */
next_seq = log_next_seq;
len = 0;
- prev = 0;
while (len >= 0 && seq < next_seq) {
struct printk_log *msg = log_from_idx(idx);
int textlen;
@@ -1139,7 +1114,6 @@ try_again:
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
-out:
raw_spin_unlock_irq(&logbuf_lock);
kfree(text);
@@ -1297,7 +1271,6 @@ static void call_console_drivers(int level, const char *text, size_t len)
if (!console_drivers)
return;
- migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
@@ -1310,7 +1283,6 @@ static void call_console_drivers(int level, const char *text, size_t len)
continue;
con->write(con, text, len);
}
- migrate_enable();
}
/*
@@ -1370,18 +1342,12 @@ static inline int can_use_console(unsigned int cpu)
* interrupts disabled. It should return with 'lockbuf_lock'
* released but interrupts still disabled.
*/
-static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+static int console_trylock_for_printk(unsigned int cpu)
__releases(&logbuf_lock)
{
int retval = 0, wake = 0;
-#ifdef CONFIG_PREEMPT_RT_FULL
- int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
- (preempt_count() <= 1);
-#else
- int lock = 1;
-#endif
- if (lock && console_trylock()) {
+ if (console_trylock()) {
retval = 1;
/*
@@ -1521,62 +1487,6 @@ static size_t cont_print_text(char *text, size_t size)
return textlen;
}
-#ifdef CONFIG_EARLY_PRINTK
-struct console *early_console;
-
-void early_vprintk(const char *fmt, va_list ap)
-{
- if (early_console) {
- char buf[512];
- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-
- early_console->write(early_console, buf, n);
- }
-}
-
-asmlinkage void early_printk(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- early_vprintk(fmt, ap);
- va_end(ap);
-}
-
-/*
- * This is independent of any log levels - a global
- * kill switch that turns off all of printk.
- *
- * Used by the NMI watchdog if early-printk is enabled.
- */
-static bool __read_mostly printk_killswitch;
-
-static int __init force_early_printk_setup(char *str)
-{
- printk_killswitch = true;
- return 0;
-}
-early_param("force_early_printk", force_early_printk_setup);
-
-void printk_kill(void)
-{
- printk_killswitch = true;
-}
-
-static int forced_early_printk(const char *fmt, va_list ap)
-{
- if (!printk_killswitch)
- return 0;
- early_vprintk(fmt, ap);
- return 1;
-}
-#else
-static inline int forced_early_printk(const char *fmt, va_list ap)
-{
- return 0;
-}
-#endif
-
asmlinkage int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args)
@@ -1590,13 +1500,6 @@ asmlinkage int vprintk_emit(int facility, int level,
int this_cpu;
int printed_len = 0;
- /*
- * Fall back to early_printk if a debugging subsystem has
- * killed printk output
- */
- if (unlikely(forced_early_printk(fmt, args)))
- return 1;
-
boot_delay_msec(level);
printk_delay();
@@ -1716,15 +1619,8 @@ asmlinkage int vprintk_emit(int facility, int level,
* The console_trylock_for_printk() function will release 'logbuf_lock'
* regardless of whether it actually gets the console semaphore or not.
*/
- if (console_trylock_for_printk(this_cpu, flags)) {
-#ifndef CONFIG_PREEMPT_RT_FULL
+ if (console_trylock_for_printk(this_cpu))
console_unlock();
-#else
- raw_local_irq_restore(flags);
- console_unlock();
- raw_local_irq_save(flags);
-#endif
- }
lockdep_on();
out_restore_irqs:
@@ -1826,6 +1722,29 @@ static size_t cont_print_text(char *text, size_t size) { return 0; }
#endif /* CONFIG_PRINTK */
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
+
+void early_vprintk(const char *fmt, va_list ap)
+{
+ if (early_console) {
+ char buf[512];
+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+
+ early_console->write(early_console, buf, n);
+ }
+}
+
+asmlinkage void early_printk(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ early_vprintk(fmt, ap);
+ va_end(ap);
+}
+#endif
+
static int __add_preferred_console(char *name, int idx, char *options,
char *brl_options)
{
@@ -2066,16 +1985,11 @@ static void console_cont_flush(char *text, size_t size)
goto out;
len = cont_print_text(text, size);
-#ifndef CONFIG_PREEMPT_RT_FULL
raw_spin_unlock(&logbuf_lock);
stop_critical_timings();
call_console_drivers(cont.level, text, len);
start_critical_timings();
local_irq_restore(flags);
-#else
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- call_console_drivers(cont.level, text, len);
-#endif
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
@@ -2158,17 +2072,12 @@ skip:
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
-
-#ifndef CONFIG_PREEMPT_RT_FULL
raw_spin_unlock(&logbuf_lock);
+
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(level, text, len);
start_critical_timings();
local_irq_restore(flags);
-#else
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- call_console_drivers(level, text, len);
-#endif
}
console_locked = 0;
mutex_release(&console_lock_dep_map, 1, _RET_IP_);
@@ -2880,7 +2789,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
next_idx = idx;
l = 0;
- prev = 0;
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index fddaf65..1f4bcb3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -135,12 +135,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
- raw_spin_lock_irq(&task->pi_lock);
- if (task->state & __TASK_TRACED)
- task->state = __TASK_TRACED;
- else
- task->saved_state = __TASK_TRACED;
- raw_spin_unlock_irq(&task->pi_lock);
+ task->state = __TASK_TRACED;
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 7e1dd3e..b02a339 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -129,7 +129,6 @@ int notrace debug_lockdep_rcu_enabled(void)
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
-#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
@@ -156,7 +155,6 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
-#endif
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index f202b26..9ed6075 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -369,7 +369,6 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
* quiescent state.
@@ -379,7 +378,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-#endif
void rcu_init(void)
{
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 507fab1..32618b3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -55,11 +55,6 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
#include <linux/suspend.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/oom.h>
-#include <linux/smpboot.h>
-#include "time/tick-internal.h"
#include "rcutree.h"
#include <trace/events/rcu.h>
@@ -150,6 +145,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
+#ifdef CONFIG_RCU_BOOST
+
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
@@ -159,6 +156,8 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -200,19 +199,6 @@ void rcu_sched_qs(int cpu)
rdp->passed_quiesce = 1;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void rcu_preempt_qs(int cpu);
-
-void rcu_bh_qs(int cpu)
-{
- unsigned long flags;
-
- /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
- local_irq_save(flags);
- rcu_preempt_qs(cpu);
- local_irq_restore(flags);
-}
-#else
void rcu_bh_qs(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
@@ -221,7 +207,6 @@ void rcu_bh_qs(int cpu)
trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
-#endif
/*
* Note a context switch. This is a quiescent state for RCU-sched,
@@ -278,7 +263,6 @@ long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Return the number of RCU BH batches processed thus far for debug & stats.
*/
@@ -296,7 +280,6 @@ void rcu_bh_force_quiescent_state(void)
force_quiescent_state(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-#endif
/*
* Record the number of times rcutorture tests have been initiated and
@@ -1488,7 +1471,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for (;;) {
- swait_event_interruptible(rsp->gp_wq,
+ wait_event_interruptible(rsp->gp_wq,
rsp->gp_flags &
RCU_GP_FLAG_INIT);
if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
@@ -1507,7 +1490,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
}
for (;;) {
rsp->jiffies_force_qs = jiffies + j;
- ret = swait_event_interruptible_timeout(rsp->gp_wq,
+ ret = wait_event_interruptible_timeout(rsp->gp_wq,
(rsp->gp_flags & RCU_GP_FLAG_FQS) ||
(!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)),
@@ -1545,7 +1528,7 @@ static void rsp_wakeup(struct irq_work *work)
struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
/* Wake up rcu_gp_kthread() to start the grace period. */
- swait_wake(&rsp->gp_wq);
+ wake_up(&rsp->gp_wq);
}
/*
@@ -1619,7 +1602,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
{
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
- swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
+ wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
@@ -2189,8 +2172,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
}
rsp->gp_flags |= RCU_GP_FLAG_FQS;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
- /* Memory barrier implied by wake_up() path. */
- swait_wake(&rsp->gp_wq);
+ wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
}
/*
@@ -2227,14 +2209,16 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_state *rsp;
if (cpu_is_offline(smp_processor_id()))
return;
+ trace_rcu_utilization(TPS("Start RCU core"));
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
+ trace_rcu_utilization(TPS("End RCU core"));
}
/*
@@ -2248,105 +2232,18 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
return;
- rcu_do_batch(rsp, rdp);
-}
-
-static void rcu_wake_cond(struct task_struct *t, int status)
-{
- /*
- * If the thread is yielding, only wake it when this
- * is invoked from idle
- */
- if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
- wake_up_process(t);
-}
-
-/*
- * Wake up this CPU's rcuc kthread to do RCU core processing.
- */
-static void invoke_rcu_core(void)
-{
- unsigned long flags;
- struct task_struct *t;
-
- if (!cpu_online(smp_processor_id()))
+ if (likely(!rsp->boost)) {
+ rcu_do_batch(rsp, rdp);
return;
- local_irq_save(flags);
- __this_cpu_write(rcu_cpu_has_work, 1);
- t = __this_cpu_read(rcu_cpu_kthread_task);
- if (t != NULL && current != t)
- rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
- local_irq_restore(flags);
-}
-
-static void rcu_cpu_kthread_park(unsigned int cpu)
-{
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-}
-
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
-{
- return __this_cpu_read(rcu_cpu_has_work);
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * RCU softirq used in flavors and configurations of RCU that do not
- * support RCU priority boosting.
- */
-static void rcu_cpu_kthread(unsigned int cpu)
-{
- unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
- char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
- int spincnt;
-
- for (spincnt = 0; spincnt < 10; spincnt++) {
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
- local_bh_disable();
- *statusp = RCU_KTHREAD_RUNNING;
- this_cpu_inc(rcu_cpu_kthread_loops);
- local_irq_disable();
- work = *workp;
- *workp = 0;
- local_irq_enable();
- if (work)
- rcu_process_callbacks();
- local_bh_enable();
- if (*workp == 0) {
- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
- *statusp = RCU_KTHREAD_WAITING;
- return;
- }
}
- *statusp = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
- schedule_timeout_interruptible(2);
- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
- *statusp = RCU_KTHREAD_WAITING;
+ invoke_rcu_callbacks_kthread();
}
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- .store = &rcu_cpu_kthread_task,
- .thread_should_run = rcu_cpu_kthread_should_run,
- .thread_fn = rcu_cpu_kthread,
- .thread_comm = "rcuc/%u",
- .setup = rcu_cpu_kthread_setup,
- .park = rcu_cpu_kthread_park,
-};
-
-/*
- * Spawn per-CPU RCU core processing kthreads.
- */
-static int __init rcu_spawn_core_kthreads(void)
+static void invoke_rcu_core(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(rcu_cpu_has_work, cpu) = 0;
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- return 0;
+ if (cpu_online(smp_processor_id()))
+ raise_softirq(RCU_SOFTIRQ);
}
-early_initcall(rcu_spawn_core_kthreads);
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
@@ -2476,7 +2373,6 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
@@ -2485,7 +2381,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-#endif
/*
* Because a context switch is a grace period for RCU-sched and RCU-bh,
@@ -2563,7 +2458,6 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
-#ifndef CONFIG_PREEMPT_RT_FULL
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
@@ -2590,7 +2484,6 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-#endif
static int synchronize_sched_expedited_cpu_stop(void *data)
{
@@ -2765,10 +2658,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Check for CPU stalls, if enabled. */
check_cpu_stall(rsp, rdp);
- /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
- if (rcu_nohz_full_cpu(rsp))
- return 0;
-
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
rdp->qs_pending && !rdp->passed_quiesce) {
@@ -3002,7 +2891,6 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
@@ -3011,7 +2899,6 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-#endif
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
@@ -3315,7 +3202,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
}
rsp->rda = rda;
- init_swait_head(&rsp->gp_wq);
+ init_waitqueue_head(&rsp->gp_wq);
init_irq_work(&rsp->wakeup_work, rsp_wakeup);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
@@ -3411,6 +3298,7 @@ void __init rcu_init(void)
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
* We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1df8d9e..52be957 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -28,7 +28,6 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#include <linux/irq_work.h>
-#include <linux/wait-simple.h>
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -201,7 +200,7 @@ struct rcu_node {
/* This can happen due to race conditions. */
#endif /* #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_NOCB_CPU
- struct swait_head nocb_gp_wq[2];
+ wait_queue_head_t nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
int need_future_gp[2];
@@ -334,7 +333,7 @@ struct rcu_data {
atomic_long_t nocb_q_count_lazy; /* (approximate). */
int nocb_p_count; /* # CBs being invoked by kthread */
int nocb_p_count_lazy; /* (approximate). */
- struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */
+ wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
struct task_struct *nocb_kthread;
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
@@ -404,7 +403,7 @@ struct rcu_state {
unsigned long gpnum; /* Current gp number. */
unsigned long completed; /* # of last completed gp. */
struct task_struct *gp_kthread; /* Task for grace periods. */
- struct swait_head gp_wq; /* Where GP task waits. */
+ wait_queue_head_t gp_wq; /* Where GP task waits. */
int gp_flags; /* Commands for GP task. */
/* End of fields guarded by root rcu_node's lock. */
@@ -528,9 +527,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+static void invoke_rcu_callbacks_kthread(void);
static bool rcu_is_callbacks_kthread(void);
-static void rcu_cpu_kthread_setup(unsigned int cpu);
#ifdef CONFIG_RCU_BOOST
+static void rcu_preempt_do_callbacks(void);
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -564,7 +564,6 @@ static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
unsigned long maxj);
static void rcu_bind_gp_kthread(void);
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c849bd4..511e6b4 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,6 +24,12 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
+#include "time/tick-internal.h"
+
#define RCU_KTHREAD_PRIO 1
#ifdef CONFIG_RCU_BOOST
@@ -353,7 +359,7 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block. */
- if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags);
return;
}
@@ -650,6 +656,15 @@ static void rcu_preempt_check_callbacks(int cpu)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+ rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
@@ -1111,19 +1126,6 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
-/*
- * If boosting, set rcuc kthreads to realtime priority.
- */
-static void rcu_cpu_kthread_setup(unsigned int cpu)
-{
-#ifdef CONFIG_RCU_BOOST
- struct sched_param sp;
-
- sp.sched_priority = RCU_KTHREAD_PRIO;
- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-#endif /* #ifdef CONFIG_RCU_BOOST */
-}
-
#ifdef CONFIG_RCU_BOOST
#include "rtmutex_common.h"
@@ -1155,6 +1157,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+ /*
+ * If the thread is yielding, only wake it when this
+ * is invoked from idle
+ */
+ if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
+ wake_up_process(t);
+}
+
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1298,6 +1310,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
}
/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_cpu_kthread_task)) {
+ rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
+ __this_cpu_read(rcu_cpu_kthread_status));
+ }
+ local_irq_restore(flags);
+}
+
+/*
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
@@ -1351,6 +1380,67 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
+static void rcu_kthread_do_work(void)
+{
+ rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+ rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+ rcu_preempt_do_callbacks();
+}
+
+static void rcu_cpu_kthread_setup(unsigned int cpu)
+{
+ struct sched_param sp;
+
+ sp.sched_priority = RCU_KTHREAD_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+}
+
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+ return __get_cpu_var(rcu_cpu_has_work);
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
+ unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
+ char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
+ int spincnt;
+
+ for (spincnt = 0; spincnt < 10; spincnt++) {
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+ local_bh_disable();
+ *statusp = RCU_KTHREAD_RUNNING;
+ this_cpu_inc(rcu_cpu_kthread_loops);
+ local_irq_disable();
+ work = *workp;
+ *workp = 0;
+ local_irq_enable();
+ if (work)
+ rcu_kthread_do_work();
+ local_bh_enable();
+ if (*workp == 0) {
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+ *statusp = RCU_KTHREAD_WAITING;
+ return;
+ }
+ }
+ *statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+ schedule_timeout_interruptible(2);
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+ *statusp = RCU_KTHREAD_WAITING;
+}
+
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
@@ -1384,14 +1474,27 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
free_cpumask_var(cm);
}
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+ .store = &rcu_cpu_kthread_task,
+ .thread_should_run = rcu_cpu_kthread_should_run,
+ .thread_fn = rcu_cpu_kthread,
+ .thread_comm = "rcuc/%u",
+ .setup = rcu_cpu_kthread_setup,
+ .park = rcu_cpu_kthread_park,
+};
+
/*
* Spawn all kthreads -- called as soon as the scheduler is running.
*/
static int __init rcu_spawn_kthreads(void)
{
struct rcu_node *rnp;
+ int cpu;
rcu_scheduler_fully_active = 1;
+ for_each_possible_cpu(cpu)
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
rnp = rcu_get_root(rcu_state);
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
if (NUM_RCU_NODES > 1) {
@@ -1419,6 +1522,11 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
+static void invoke_rcu_callbacks_kthread(void)
+{
+ WARN_ON_ONCE(1);
+}
+
static bool rcu_is_callbacks_kthread(void)
{
return false;
@@ -1445,7 +1553,7 @@ static void rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
-#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Check to see if any future RCU-related work will need to be done
@@ -1461,9 +1569,6 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
*delta_jiffies = ULONG_MAX;
return rcu_cpu_has_callbacks(cpu, NULL);
}
-#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
-
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
@@ -1561,8 +1666,6 @@ static bool rcu_try_advance_all_cbs(void)
return cbs_ready;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
@@ -1601,7 +1704,6 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
}
return 0;
}
-#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
@@ -1959,7 +2061,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
*/
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
- swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
}
/*
@@ -1977,8 +2079,8 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
static void rcu_init_one_nocb(struct rcu_node *rnp)
{
- init_swait_head(&rnp->nocb_gp_wq[0]);
- init_swait_head(&rnp->nocb_gp_wq[1]);
+ init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+ init_waitqueue_head(&rnp->nocb_gp_wq[1]);
}
/* Is the specified CPU a no-CPUs CPU? */
@@ -2018,7 +2120,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
return;
len = atomic_long_read(&rdp->nocb_q_count);
if (old_rhpp == &rdp->nocb_head) {
- swait_wake(&rdp->nocb_wq); /* ... only if queue was empty ... */
+ wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
rdp->qlen_last_fqs_check = 0;
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
wake_up_process(t); /* ... or if many callbacks queued. */
@@ -2108,7 +2210,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
- swait_event_interruptible(
+ wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
if (likely(d))
@@ -2136,7 +2238,7 @@ static int rcu_nocb_kthread(void *arg)
for (;;) {
/* If not polling, wait for next batch of callbacks. */
if (!rcu_nocb_poll)
- swait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
list = ACCESS_ONCE(rdp->nocb_head);
if (!list) {
schedule_timeout_interruptible(1);
@@ -2186,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
rdp->nocb_tail = &rdp->nocb_head;
- init_swait_head(&rdp->nocb_wq);
+ init_waitqueue_head(&rdp->nocb_wq);
}
/* Create a kthread for each RCU flavor for each no-CBs CPU. */
@@ -2701,23 +2803,3 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
}
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
-/*
- * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
- * grace-period kthread will do force_quiescent_state() processing?
- * The idea is to avoid waking up RCU core processing on such a
- * CPU unless the grace period has extended for too long.
- *
- * This code relies on the fact that all NO_HZ_FULL CPUs are also
- * CONFIG_RCU_NOCB_CPUs.
- */
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_cpu(smp_processor_id()) &&
- (!rcu_gp_in_progress(rsp) ||
- ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
- return 1;
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
- return 0;
-}
diff --git a/kernel/relay.c b/kernel/relay.c
index b915513..5001c98 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -339,10 +339,6 @@ static void wakeup_readers(unsigned long data)
{
struct rchan_buf *buf = (struct rchan_buf *)data;
wake_up_interruptible(&buf->read_wait);
- /*
- * Stupid polling for now:
- */
- mod_timer(&buf->timer, jiffies + 1);
}
/**
@@ -360,7 +356,6 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
- mod_timer(&buf->timer, jiffies + 1);
} else
del_timer_sync(&buf->timer);
@@ -744,6 +739,15 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
else
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
+ smp_mb();
+ if (waitqueue_active(&buf->read_wait))
+ /*
+ * Calling wake_up_interruptible() from here
+ * will deadlock if we happen to be logging
+ * from the scheduler (trying to re-grab
+ * rq->lock), so defer it.
+ */
+ mod_timer(&buf->timer, jiffies + 1);
}
old = buf->data;
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 3fbcb0d..4aa8a30 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -49,7 +49,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val,
r = ret = 0;
*limit_fail_at = NULL;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
r = res_counter_charge_locked(c, val, force);
@@ -69,7 +69,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val,
spin_unlock(&u->lock);
}
}
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return ret;
}
@@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter,
struct res_counter *c;
u64 ret = 0;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
for (c = counter; c != top; c = c->parent) {
u64 r;
spin_lock(&c->lock);
@@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter,
ret = r;
spin_unlock(&c->lock);
}
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return ret;
}
diff --git a/kernel/rt.c b/kernel/rt.c
deleted file mode 100644
index 5d17727..0000000
--- a/kernel/rt.c
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * kernel/rt.c
- *
- * Real-Time Preemption Support
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- * historic credit for proving that Linux spinlocks can be implemented via
- * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
- * and others) who prototyped it on 2.4 and did lots of comparative
- * research and analysis; TimeSys, for proving that you can implement a
- * fully preemptible kernel via the use of IRQ threading and mutexes;
- * Bill Huey for persuasively arguing on lkml that the mutex model is the
- * right one; and to MontaVista, who ported pmutexes to 2.6.
- *
- * This code is a from-scratch implementation and is not based on pmutexes,
- * but the idea of converting spinlocks to mutexes is used here too.
- *
- * lock debugging, locking tree, deadlock detection:
- *
- * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
- * Released under the General Public License (GPL).
- *
- * Includes portions of the generic R/W semaphore implementation from:
- *
- * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- *
- * Pending ownership of locks and ownership stealing:
- *
- * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
- *
- * (also by Steven Rostedt)
- * - Converted single pi_lock to individual task locks.
- *
- * By Esben Nielsen:
- * Doing priority inheritance with help of the scheduler.
- *
- * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- * - major rework based on Esben Nielsens initial patch
- * - replaced thread_info references by task_struct refs
- * - removed task->pending_owner dependency
- * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
- * in the scheduler return path as discussed with Steven Rostedt
- *
- * Copyright (C) 2006, Kihon Technologies Inc.
- * Steven Rostedt <rostedt@goodmis.org>
- * - debugged and patched Thomas Gleixner's rework.
- * - added back the cmpxchg to the rework.
- * - turned atomic require back on for SMP.
- */
-
-#include <linux/spinlock.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/syscalls.h>
-#include <linux/interrupt.h>
-#include <linux/plist.h>
-#include <linux/fs.h>
-#include <linux/futex.h>
-#include <linux/hrtimer.h>
-
-#include "rtmutex_common.h"
-
-/*
- * struct mutex functions
- */
-void __mutex_do_init(struct mutex *mutex, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
- lockdep_init_map(&mutex->dep_map, name, key, 0);
-#endif
- mutex->lock.save_state = 0;
-}
-EXPORT_SYMBOL(__mutex_do_init);
-
-void __lockfunc _mutex_lock(struct mutex *lock)
-{
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- rt_mutex_lock(&lock->lock);
-}
-EXPORT_SYMBOL(_mutex_lock);
-
-int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
-{
- int ret;
-
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- ret = rt_mutex_lock_interruptible(&lock->lock, 0);
- if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(_mutex_lock_interruptible);
-
-int __lockfunc _mutex_lock_killable(struct mutex *lock)
-{
- int ret;
-
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- ret = rt_mutex_lock_killable(&lock->lock, 0);
- if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(_mutex_lock_killable);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
-{
- mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
- rt_mutex_lock(&lock->lock);
-}
-EXPORT_SYMBOL(_mutex_lock_nested);
-
-void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
-{
- mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
- rt_mutex_lock(&lock->lock);
-}
-EXPORT_SYMBOL(_mutex_lock_nest_lock);
-
-int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
-{
- int ret;
-
- mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
- ret = rt_mutex_lock_interruptible(&lock->lock, 0);
- if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
-
-int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
-{
- int ret;
-
- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- ret = rt_mutex_lock_killable(&lock->lock, 0);
- if (ret)
- mutex_release(&lock->dep_map, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(_mutex_lock_killable_nested);
-#endif
-
-int __lockfunc _mutex_trylock(struct mutex *lock)
-{
- int ret = rt_mutex_trylock(&lock->lock);
-
- if (ret)
- mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-
- return ret;
-}
-EXPORT_SYMBOL(_mutex_trylock);
-
-void __lockfunc _mutex_unlock(struct mutex *lock)
-{
- mutex_release(&lock->dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&lock->lock);
-}
-EXPORT_SYMBOL(_mutex_unlock);
-
-/*
- * rwlock_t functions
- */
-int __lockfunc rt_write_trylock(rwlock_t *rwlock)
-{
- int ret = rt_mutex_trylock(&rwlock->lock);
-
- if (ret) {
- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- migrate_disable();
- }
-
- return ret;
-}
-EXPORT_SYMBOL(rt_write_trylock);
-
-int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
-{
- int ret;
-
- *flags = 0;
- ret = rt_write_trylock(rwlock);
- return ret;
-}
-EXPORT_SYMBOL(rt_write_trylock_irqsave);
-
-int __lockfunc rt_read_trylock(rwlock_t *rwlock)
-{
- struct rt_mutex *lock = &rwlock->lock;
- int ret = 1;
-
- /*
- * recursive read locks succeed when current owns the lock,
- * but not when read_depth == 0 which means that the lock is
- * write locked.
- */
- if (rt_mutex_owner(lock) != current) {
- ret = rt_mutex_trylock(lock);
- if (ret) {
- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- migrate_disable();
- }
- } else if (!rwlock->read_depth) {
- ret = 0;
- }
-
- if (ret)
- rwlock->read_depth++;
-
- return ret;
-}
-EXPORT_SYMBOL(rt_read_trylock);
-
-void __lockfunc rt_write_lock(rwlock_t *rwlock)
-{
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- migrate_disable();
- __rt_spin_lock(&rwlock->lock);
-}
-EXPORT_SYMBOL(rt_write_lock);
-
-void __lockfunc rt_read_lock(rwlock_t *rwlock)
-{
- struct rt_mutex *lock = &rwlock->lock;
-
- /*
- * recursive read locks succeed when current owns the lock
- */
- if (rt_mutex_owner(lock) != current) {
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- __rt_spin_lock(lock);
- migrate_disable();
- }
- rwlock->read_depth++;
-}
-
-EXPORT_SYMBOL(rt_read_lock);
-
-void __lockfunc rt_write_unlock(rwlock_t *rwlock)
-{
- /* NOTE: we always pass in '1' for nested, for simplicity */
- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
- __rt_spin_unlock(&rwlock->lock);
- migrate_enable();
-}
-EXPORT_SYMBOL(rt_write_unlock);
-
-void __lockfunc rt_read_unlock(rwlock_t *rwlock)
-{
- /* Release the lock only when read_depth is down to 0 */
- if (--rwlock->read_depth == 0) {
- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
- __rt_spin_unlock(&rwlock->lock);
- migrate_enable();
- }
-}
-EXPORT_SYMBOL(rt_read_unlock);
-
-unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
-{
- rt_write_lock(rwlock);
-
- return 0;
-}
-EXPORT_SYMBOL(rt_write_lock_irqsave);
-
-unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
-{
- rt_read_lock(rwlock);
-
- return 0;
-}
-EXPORT_SYMBOL(rt_read_lock_irqsave);
-
-void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
- lockdep_init_map(&rwlock->dep_map, name, key, 0);
-#endif
- rwlock->lock.save_state = 1;
- rwlock->read_depth = 0;
-}
-EXPORT_SYMBOL(__rt_rwlock_init);
-
-/*
- * rw_semaphores
- */
-
-void rt_up_write(struct rw_semaphore *rwsem)
-{
- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&rwsem->lock);
-}
-EXPORT_SYMBOL(rt_up_write);
-
-void rt_up_read(struct rw_semaphore *rwsem)
-{
- if (--rwsem->read_depth == 0) {
- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&rwsem->lock);
- }
-}
-EXPORT_SYMBOL(rt_up_read);
-
-/*
- * downgrade a write lock into a read lock
- * - just wake up any readers at the front of the queue
- */
-void rt_downgrade_write(struct rw_semaphore *rwsem)
-{
- BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
- rwsem->read_depth = 1;
-}
-EXPORT_SYMBOL(rt_downgrade_write);
-
-int rt_down_write_trylock(struct rw_semaphore *rwsem)
-{
- int ret = rt_mutex_trylock(&rwsem->lock);
-
- if (ret)
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(rt_down_write_trylock);
-
-void rt_down_write(struct rw_semaphore *rwsem)
-{
- rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
-}
-EXPORT_SYMBOL(rt_down_write);
-
-void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
-{
- rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
-}
-EXPORT_SYMBOL(rt_down_write_nested);
-
-void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
- struct lockdep_map *nest)
-{
- rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
-}
-
-int rt_down_read_trylock(struct rw_semaphore *rwsem)
-{
- struct rt_mutex *lock = &rwsem->lock;
- int ret = 1;
-
- /*
- * recursive read locks succeed when current owns the rwsem,
- * but not when read_depth == 0 which means that the rwsem is
- * write locked.
- */
- if (rt_mutex_owner(lock) != current) {
- ret = rt_mutex_trylock(&rwsem->lock);
- if (ret)
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
- } else if (!rwsem->read_depth) {
- ret = 0;
- }
-
- if (ret)
- rwsem->read_depth++;
- return ret;
-}
-EXPORT_SYMBOL(rt_down_read_trylock);
-
-static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
-{
- struct rt_mutex *lock = &rwsem->lock;
-
- if (rt_mutex_owner(lock) != current) {
- rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
- rt_mutex_lock(&rwsem->lock);
- }
- rwsem->read_depth++;
-}
-
-void rt_down_read(struct rw_semaphore *rwsem)
-{
- __rt_down_read(rwsem, 0);
-}
-EXPORT_SYMBOL(rt_down_read);
-
-void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
-{
- __rt_down_read(rwsem, subclass);
-}
-EXPORT_SYMBOL(rt_down_read_nested);
-
-void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
- lockdep_init_map(&rwsem->dep_map, name, key, 0);
-#endif
- rwsem->read_depth = 0;
- rwsem->lock.save_state = 0;
-}
-EXPORT_SYMBOL(__rt_rwsem_init);
-
-/**
- * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
- * @cnt: the atomic which we are to dec
- * @lock: the mutex to return holding if we dec to 0
- *
- * return true and hold lock if we dec to 0, return false otherwise
- */
-int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
-{
- /* dec if we can't possibly hit 0 */
- if (atomic_add_unless(cnt, -1, 1))
- return 0;
- /* we might hit 0, so take the lock */
- mutex_lock(lock);
- if (!atomic_dec_and_test(cnt)) {
- /* when we actually did the dec, we didn't hit 0 */
- mutex_unlock(lock);
- return 0;
- }
- /* we hit 0, and we hold the lock */
- return 1;
-}
-EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 4057bc6..0dd6aec 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -8,12 +8,6 @@
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
- * Adaptive Spinlocks:
- * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
- * and Peter Morreale,
- * Adaptive Spinlocks simplification:
- * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
- *
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
@@ -21,7 +15,6 @@
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/timer.h>
-#include <linux/ww_mutex.h>
#include "rtmutex_common.h"
@@ -75,12 +68,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
clear_rt_mutex_waiters(lock);
}
-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-{
- return waiter && waiter != PI_WAKEUP_INPROGRESS &&
- waiter != PI_REQUEUE_INPROGRESS;
-}
-
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
@@ -104,12 +91,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
}
#endif
-static inline void init_lists(struct rt_mutex *lock)
-{
- if (unlikely(!lock->wait_list.node_list.prev))
- plist_head_init(&lock->wait_list);
-}
-
/*
* Calculate task priority from the waiter list priority
*
@@ -126,18 +107,6 @@ int rt_mutex_getprio(struct task_struct *task)
}
/*
- * Called by sched_setscheduler() to check whether the priority change
- * is overruled by a possible priority boosting.
- */
-int rt_mutex_check_prio(struct task_struct *task, int newprio)
-{
- if (!task_has_pi_waiters(task))
- return 0;
-
- return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio;
-}
-
-/*
* Adjust the priority of a task, after its pi_waiters got modified.
*
* This can be both boosting and unboosting. task->pi_lock must be held.
@@ -168,14 +137,6 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
-static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
-{
- if (waiter->savestate)
- wake_up_lock_sleeper(waiter->task);
- else
- wake_up_process(waiter->task);
-}
-
/*
* Max number of times we'll walk the boosting chain:
*/
@@ -249,7 +210,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
- if (!rt_mutex_real_waiter(waiter))
+ if (!waiter)
goto out_unlock_pi;
/*
@@ -300,15 +261,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (!rt_mutex_owner(lock)) {
- struct rt_mutex_waiter *lock_top_waiter;
-
/*
* If the requeue above changed the top waiter, then we need
* to wake the new top waiter up to try to get the lock.
*/
- lock_top_waiter = rt_mutex_top_waiter(lock);
- if (top_waiter != lock_top_waiter)
- rt_mutex_wake_waiter(lock_top_waiter);
+
+ if (top_waiter != rt_mutex_top_waiter(lock))
+ wake_up_process(rt_mutex_top_waiter(lock)->task);
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
}
@@ -353,25 +312,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return ret;
}
-
-#define STEAL_NORMAL 0
-#define STEAL_LATERAL 1
-
-/*
- * Note that RT tasks are excluded from lateral-steals to prevent the
- * introduction of an unbounded latency
- */
-static inline int lock_is_stealable(struct task_struct *task,
- struct task_struct *pendowner, int mode)
-{
- if (mode == STEAL_NORMAL || rt_task(task)) {
- if (task->prio >= pendowner->prio)
- return 0;
- } else if (task->prio > pendowner->prio)
- return 0;
- return 1;
-}
-
/*
* Try to take an rt-mutex
*
@@ -381,9 +321,8 @@ static inline int lock_is_stealable(struct task_struct *task,
* @task: the task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
-static int
-__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter, int mode)
+static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
{
/*
* We have to be careful here if the atomic speedups are
@@ -416,14 +355,12 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
- struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
-
- if (task != pown && !lock_is_stealable(task, pown, mode))
- return 0;
+ if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+ if (!waiter || waiter != rt_mutex_top_waiter(lock))
+ return 0;
+ }
}
- /* We got the lock. */
-
if (waiter || rt_mutex_has_waiters(lock)) {
unsigned long flags;
struct rt_mutex_waiter *top;
@@ -448,6 +385,7 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
+ /* We got the lock. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task);
@@ -457,13 +395,6 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
return 1;
}
-static inline int
-try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter)
-{
- return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
-}
-
/*
* Task blocks on lock.
*
@@ -482,23 +413,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
int chain_walk = 0, res;
raw_spin_lock_irqsave(&task->pi_lock, flags);
-
- /*
- * In the case of futex requeue PI, this will be a proxy
- * lock. The task will wake unaware that it is enqueueed on
- * this lock. Avoid blocking on two locks and corrupting
- * pi_blocked_on via the PI_WAKEUP_INPROGRESS
- * flag. futex_wait_requeue_pi() sets this when it wakes up
- * before requeue (due to a signal or timeout). Do not enqueue
- * the task if PI_WAKEUP_INPROGRESS is set.
- */
- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- return -EAGAIN;
- }
-
- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -523,7 +437,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ if (owner->pi_blocked_on)
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
@@ -578,7 +492,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- rt_mutex_wake_waiter(waiter);
+ wake_up_process(waiter->task);
}
/*
@@ -617,7 +531,7 @@ static void remove_waiter(struct rt_mutex *lock,
}
__rt_mutex_adjust_prio(owner);
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ if (owner->pi_blocked_on)
chain_walk = 1;
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -651,371 +565,23 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
- if (!rt_mutex_real_waiter(waiter) ||
- waiter->list_entry.prio == task->prio) {
+ if (!waiter || waiter->list_entry.prio == task->prio) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * preemptible spin_lock functions:
- */
-static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
- void (*slowfn)(struct rt_mutex *lock))
-{
- might_sleep();
-
- if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
- rt_mutex_deadlock_account_lock(lock, current);
- else
- slowfn(lock);
-}
-
-static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
- void (*slowfn)(struct rt_mutex *lock))
-{
- if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
- rt_mutex_deadlock_account_unlock(current);
- else
- slowfn(lock);
-}
-
-#ifdef CONFIG_SMP
-/*
- * Note that owner is a speculative pointer and dereferencing relies
- * on rcu_read_lock() and the check against the lock owner.
- */
-static int adaptive_wait(struct rt_mutex *lock,
- struct task_struct *owner)
-{
- int res = 0;
-
- rcu_read_lock();
- for (;;) {
- if (owner != rt_mutex_owner(lock))
- break;
- /*
- * Ensure that owner->on_cpu is dereferenced _after_
- * checking the above to be valid.
- */
- barrier();
- if (!owner->on_cpu) {
- res = 1;
- break;
- }
- cpu_relax();
- }
- rcu_read_unlock();
- return res;
-}
-#else
-static int adaptive_wait(struct rt_mutex *lock,
- struct task_struct *orig_owner)
-{
- return 1;
-}
-#endif
-
-# define pi_lock(lock) raw_spin_lock_irq(lock)
-# define pi_unlock(lock) raw_spin_unlock_irq(lock)
-
-/*
- * Slow path lock function spin_lock style: this variant is very
- * careful not to miss any non-lock wakeups.
- *
- * We store the current state under p->pi_lock in p->saved_state and
- * the try_to_wake_up() code handles this accordingly.
- */
-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
-{
- struct task_struct *lock_owner, *self = current;
- struct rt_mutex_waiter waiter, *top_waiter;
- int ret;
-
- rt_mutex_init_waiter(&waiter, true);
-
- raw_spin_lock(&lock->wait_lock);
- init_lists(lock);
-
- if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
- raw_spin_unlock(&lock->wait_lock);
- return;
- }
-
- BUG_ON(rt_mutex_owner(lock) == self);
-
- /*
- * We save whatever state the task is in and we'll restore it
- * after acquiring the lock taking real wakeups into account
- * as well. We are serialized via pi_lock against wakeups. See
- * try_to_wake_up().
- */
- pi_lock(&self->pi_lock);
- self->saved_state = self->state;
- __set_current_state(TASK_UNINTERRUPTIBLE);
- pi_unlock(&self->pi_lock);
-
- ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
- BUG_ON(ret);
-
- for (;;) {
- /* Try to acquire the lock again. */
- if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
- break;
-
- top_waiter = rt_mutex_top_waiter(lock);
- lock_owner = rt_mutex_owner(lock);
-
- raw_spin_unlock(&lock->wait_lock);
-
- debug_rt_mutex_print_deadlock(&waiter);
-
- if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
- schedule_rt_mutex(lock);
-
- raw_spin_lock(&lock->wait_lock);
-
- pi_lock(&self->pi_lock);
- __set_current_state(TASK_UNINTERRUPTIBLE);
- pi_unlock(&self->pi_lock);
- }
-
- /*
- * Restore the task state to current->saved_state. We set it
- * to the original state above and the try_to_wake_up() code
- * has possibly updated it when a real (non-rtmutex) wakeup
- * happened while we were blocked. Clear saved_state so
- * try_to_wakeup() does not get confused.
- */
- pi_lock(&self->pi_lock);
- __set_current_state(self->saved_state);
- self->saved_state = TASK_RUNNING;
- pi_unlock(&self->pi_lock);
-
- /*
- * try_to_take_rt_mutex() sets the waiter bit
- * unconditionally. We might have to fix that up:
- */
- fixup_rt_mutex_waiters(lock);
-
- BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
- BUG_ON(!plist_node_empty(&waiter.list_entry));
-
- raw_spin_unlock(&lock->wait_lock);
-
- debug_rt_mutex_free_waiter(&waiter);
-}
-
-/*
- * Slow path to release a rt_mutex spin_lock style
- */
-static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
-{
- debug_rt_mutex_unlock(lock);
-
- rt_mutex_deadlock_account_unlock(current);
-
- if (!rt_mutex_has_waiters(lock)) {
- lock->owner = NULL;
- raw_spin_unlock(&lock->wait_lock);
- return;
- }
-
- wakeup_next_waiter(lock);
-
- raw_spin_unlock(&lock->wait_lock);
-
- /* Undo pi boosting.when necessary */
- rt_mutex_adjust_prio(current);
-}
-
-static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
-{
- raw_spin_lock(&lock->wait_lock);
- __rt_spin_lock_slowunlock(lock);
-}
-
-static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
-{
- int ret;
-
- do {
- ret = raw_spin_trylock(&lock->wait_lock);
- } while (!ret);
-
- __rt_spin_lock_slowunlock(lock);
-}
-
-void __lockfunc rt_spin_lock(spinlock_t *lock)
-{
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-}
-EXPORT_SYMBOL(rt_spin_lock);
-
-void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
-{
- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-}
-EXPORT_SYMBOL(__rt_spin_lock);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
-{
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-}
-EXPORT_SYMBOL(rt_spin_lock_nested);
-#endif
-
-void __lockfunc rt_spin_unlock(spinlock_t *lock)
-{
- /* NOTE: we always pass in '1' for nested, for simplicity */
- spin_release(&lock->dep_map, 1, _RET_IP_);
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
-}
-EXPORT_SYMBOL(rt_spin_unlock);
-
-void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
-{
- /* NOTE: we always pass in '1' for nested, for simplicity */
- spin_release(&lock->dep_map, 1, _RET_IP_);
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
-}
-
-void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
-{
- rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
-}
-EXPORT_SYMBOL(__rt_spin_unlock);
-
-/*
- * Wait for the lock to get unlocked: instead of polling for an unlock
- * (like raw spinlocks do), we lock and unlock, to force the kernel to
- * schedule if there's contention:
- */
-void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
-{
- spin_lock(lock);
- spin_unlock(lock);
-}
-EXPORT_SYMBOL(rt_spin_unlock_wait);
-
-int __lockfunc rt_spin_trylock(spinlock_t *lock)
-{
- int ret = rt_mutex_trylock(&lock->lock);
-
- if (ret)
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock);
-
-int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
-{
- int ret;
-
- local_bh_disable();
- ret = rt_mutex_trylock(&lock->lock);
- if (ret) {
- migrate_disable();
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- } else
- local_bh_enable();
- return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock_bh);
-
-int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
-{
- int ret;
-
- *flags = 0;
- ret = rt_mutex_trylock(&lock->lock);
- if (ret) {
- migrate_disable();
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- }
- return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock_irqsave);
-
-int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
-{
- /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
- if (atomic_add_unless(atomic, -1, 1))
- return 0;
- rt_spin_lock(lock);
- if (atomic_dec_and_test(atomic)){
- migrate_disable();
- return 1;
- }
- rt_spin_unlock(lock);
- return 0;
-}
-EXPORT_SYMBOL(atomic_dec_and_spin_lock);
-
-void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-}
-EXPORT_SYMBOL(__rt_spin_lock_init);
-
-#endif /* PREEMPT_RT_FULL */
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline int __sched
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-{
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
- struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
-
- if (!hold_ctx)
- return 0;
-
- if (unlikely(ctx == hold_ctx))
- return -EALREADY;
-
- if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
- (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
-#ifdef CONFIG_DEBUG_MUTEXES
- DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
- ctx->contending_lock = ww;
-#endif
- return -EDEADLK;
- }
-
- return 0;
-}
-#else
-static inline int __sched
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-{
- BUG();
- return 0;
-}
-
-#endif
-
/**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE
- * or TASK_UNINTERRUPTIBLE)
+ * or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
@@ -1024,8 +590,7 @@ __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- struct rt_mutex_waiter *waiter,
- struct ww_acquire_ctx *ww_ctx)
+ struct rt_mutex_waiter *waiter)
{
int ret = 0;
@@ -1048,12 +613,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
- if (ww_ctx && ww_ctx->acquired > 0) {
- ret = __mutex_lock_check_stamp(lock, ww_ctx);
- if (ret)
- break;
- }
-
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1067,102 +626,23 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
return ret;
}
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
- struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
- /*
- * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
- * but released with a normal mutex_unlock in this call.
- *
- * This should never happen, always use ww_mutex_unlock.
- */
- DEBUG_LOCKS_WARN_ON(ww->ctx);
-
- /*
- * Not quite done after calling ww_acquire_done() ?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
-
- if (ww_ctx->contending_lock) {
- /*
- * After -EDEADLK you tried to
- * acquire a different ww_mutex? Bad!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
- /*
- * You called ww_mutex_lock after receiving -EDEADLK,
- * but 'forgot' to unlock everything else first?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
- ww_ctx->contending_lock = NULL;
- }
-
- /*
- * Naughty, using a different class will lead to undefined behavior!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
- ww_ctx->acquired++;
-}
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void ww_mutex_account_lock(struct rt_mutex *lock,
- struct ww_acquire_ctx *ww_ctx)
-{
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
- struct rt_mutex_waiter *waiter;
-
- /*
- * This branch gets optimized out for the common case,
- * and is only important for ww_mutex_lock.
- */
- ww_mutex_lock_acquired(ww, ww_ctx);
- ww->ctx = ww_ctx;
-
- /*
- * Give any possible sleeping processes the chance to wake up,
- * so they can recheck if they have to back off.
- */
- plist_for_each_entry(waiter, &lock->wait_list, list_entry) {
-
- /* XXX debug rt mutex waiter wakeup */
-
- BUG_ON(waiter->lock != lock);
- rt_mutex_wake_waiter(waiter);
- }
-}
-
-#else
-
-static void ww_mutex_account_lock(struct rt_mutex *lock,
- struct ww_acquire_ctx *ww_ctx)
-{
- BUG();
-}
-#endif
-
/*
* Slow path lock function:
*/
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- int detect_deadlock, struct ww_acquire_ctx *ww_ctx)
+ int detect_deadlock)
{
struct rt_mutex_waiter waiter;
int ret = 0;
- rt_mutex_init_waiter(&waiter, false);
+ debug_rt_mutex_init_waiter(&waiter);
raw_spin_lock(&lock->wait_lock);
- init_lists(lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
- if (ww_ctx)
- ww_mutex_account_lock(lock, ww_ctx);
raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -1179,14 +659,12 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
if (likely(!ret))
- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx);
+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
set_current_state(TASK_RUNNING);
if (unlikely(ret))
remove_waiter(lock, &waiter);
- else if (ww_ctx)
- ww_mutex_account_lock(lock, ww_ctx);
/*
* try_to_take_rt_mutex() sets the waiter bit
@@ -1213,9 +691,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
- if (!raw_spin_trylock(&lock->wait_lock))
- return ret;
- init_lists(lock);
+ raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -1266,33 +742,30 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
- int detect_deadlock, struct ww_acquire_ctx *ww_ctx,
+ int detect_deadlock,
int (*slowfn)(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- int detect_deadlock,
- struct ww_acquire_ctx *ww_ctx))
+ int detect_deadlock))
{
if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
- return slowfn(lock, state, NULL, detect_deadlock, ww_ctx);
+ return slowfn(lock, state, NULL, detect_deadlock);
}
static inline int
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout, int detect_deadlock,
- struct ww_acquire_ctx *ww_ctx,
int (*slowfn)(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
- int detect_deadlock,
- struct ww_acquire_ctx *ww_ctx))
+ int detect_deadlock))
{
if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
- return slowfn(lock, state, timeout, detect_deadlock, ww_ctx);
+ return slowfn(lock, state, timeout, detect_deadlock);
}
static inline int
@@ -1325,19 +798,19 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
{
might_sleep();
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, rt_mutex_slowlock);
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
/**
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
* @detect_deadlock: deadlock detection on/off
*
* Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
+ * 0 on success
+ * -EINTR when interrupted by a signal
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
@@ -1346,43 +819,22 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
might_sleep();
return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
- detect_deadlock, NULL, rt_mutex_slowlock);
+ detect_deadlock, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
- * rt_mutex_lock_killable - lock a rt_mutex killable
- *
- * @lock: the rt_mutex to be locked
- * @detect_deadlock: deadlock detection on/off
- *
- * Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
- */
-int __sched rt_mutex_lock_killable(struct rt_mutex *lock,
- int detect_deadlock)
-{
- might_sleep();
-
- return rt_mutex_fastlock(lock, TASK_KILLABLE,
- detect_deadlock, NULL, rt_mutex_slowlock);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-
-/**
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
* by the caller
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
* @timeout: timeout structure or NULL (no timeout)
* @detect_deadlock: deadlock detection on/off
*
* Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
+ * 0 on success
+ * -EINTR when interrupted by a signal
* -ETIMEDOUT when the timeout expired
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
@@ -1393,7 +845,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
- detect_deadlock, NULL, rt_mutex_slowlock);
+ detect_deadlock, rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@@ -1451,11 +903,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
+ raw_spin_lock_init(&lock->wait_lock);
plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
}
-EXPORT_SYMBOL(__rt_mutex_init);
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
@@ -1470,7 +923,7 @@ EXPORT_SYMBOL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
- rt_mutex_init(lock);
+ __rt_mutex_init(lock, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
@@ -1519,35 +972,6 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
return 1;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * In PREEMPT_RT there's an added race.
- * If the task, that we are about to requeue, times out,
- * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
- * to skip this task. But right after the task sets
- * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
- * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
- * This will replace the PI_WAKEUP_INPROGRESS with the actual
- * lock that it blocks on. We *must not* place this task
- * on this proxy lock in that case.
- *
- * To prevent this race, we first take the task's pi_lock
- * and check if it has updated its pi_blocked_on. If it has,
- * we assume that it woke up and we return -EAGAIN.
- * Otherwise, we set the task's pi_blocked_on to
- * PI_REQUEUE_INPROGRESS, so that if the task is waking up
- * it will know that we are in the process of requeuing it.
- */
- raw_spin_lock_irq(&task->pi_lock);
- if (task->pi_blocked_on) {
- raw_spin_unlock_irq(&task->pi_lock);
- raw_spin_unlock(&lock->wait_lock);
- return -EAGAIN;
- }
- task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- raw_spin_unlock_irq(&task->pi_lock);
-#endif
-
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
@@ -1617,7 +1041,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
set_current_state(TASK_INTERRUPTIBLE);
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
set_current_state(TASK_RUNNING);
@@ -1634,88 +1058,3 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
return ret;
}
-
-static inline int
-ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
- unsigned tmp;
-
- if (ctx->deadlock_inject_countdown-- == 0) {
- tmp = ctx->deadlock_inject_interval;
- if (tmp > UINT_MAX/4)
- tmp = UINT_MAX;
- else
- tmp = tmp*2 + tmp + tmp/2;
-
- ctx->deadlock_inject_interval = tmp;
- ctx->deadlock_inject_countdown = tmp;
- ctx->contending_lock = lock;
-
- ww_mutex_unlock(lock);
-
- return -EDEADLK;
- }
-#endif
-
- return 0;
-}
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-int __sched
-__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
- int ret;
-
- might_sleep();
-
- mutex_acquire(&lock->base.dep_map, 0, 0, _RET_IP_);
- ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
- if (ret)
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
- else if (!ret && ww_ctx->acquired > 1)
- return ww_mutex_deadlock_injection(lock, ww_ctx);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
-
-int __sched
-__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-{
- int ret;
-
- might_sleep();
-
- mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map,
- _RET_IP_);
- ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
- if (ret)
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
- else if (!ret && ww_ctx->acquired > 1)
- return ww_mutex_deadlock_injection(lock, ww_ctx);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__ww_mutex_lock);
-
-void __sched ww_mutex_unlock(struct ww_mutex *lock)
-{
- /*
- * The unlocking fastpath is the 0->1 transition from 'locked'
- * into 'unlocked' state:
- */
- if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
- DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
- if (lock->ctx->acquired > 0)
- lock->ctx->acquired--;
- lock->ctx = NULL;
- }
-
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
- rt_mutex_unlock(&lock->base.lock);
-}
-EXPORT_SYMBOL(ww_mutex_unlock);
-#endif
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 6ec3dc1..53a66c8 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -49,7 +49,6 @@ struct rt_mutex_waiter {
struct plist_node pi_list_entry;
struct task_struct *task;
struct rt_mutex *lock;
- bool savestate;
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
@@ -104,9 +103,6 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
/*
* PI-futex support (proxy locking functions, etc.):
*/
-#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
-#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
-
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -127,12 +123,4 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
# include "rtmutex.h"
#endif
-static inline void
-rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
-{
- debug_rt_mutex_init_waiter(waiter);
- waiter->task = NULL;
- waiter->savestate = savestate;
-}
-
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8749d20..a494ace 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -272,11 +272,7 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
const_debug unsigned int sysctl_sched_nr_migrate = 32;
-#else
-const_debug unsigned int sysctl_sched_nr_migrate = 8;
-#endif
/*
* period over which we average the RT time consumption, measured
@@ -495,7 +491,6 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
- rq->hrtick_timer.irqsafe = 1;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
@@ -540,37 +535,6 @@ void resched_task(struct task_struct *p)
smp_send_reschedule(cpu);
}
-#ifdef CONFIG_PREEMPT_LAZY
-void resched_task_lazy(struct task_struct *p)
-{
- int cpu;
-
- if (!sched_feat(PREEMPT_LAZY)) {
- resched_task(p);
- return;
- }
-
- assert_raw_spin_locked(&task_rq(p)->lock);
-
- if (test_tsk_need_resched(p))
- return;
-
- if (test_tsk_need_resched_lazy(p))
- return;
-
- set_tsk_need_resched_lazy(p);
-
- cpu = task_cpu(p);
- if (cpu == smp_processor_id())
- return;
-
- /* NEED_RESCHED_LAZY must be visible before we test polling */
- smp_mb();
- if (!tsk_is_polling(p))
- smp_send_reschedule(cpu);
-}
-#endif
-
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -735,17 +699,6 @@ void resched_task(struct task_struct *p)
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
-#ifdef CONFIG_PREEMPT_LAZY
-void resched_task_lazy(struct task_struct *p)
-{
- if (!sched_feat(PREEMPT_LAZY)) {
- resched_task(p);
- return;
- }
- assert_raw_spin_locked(&task_rq(p)->lock);
- set_tsk_need_resched_lazy(p);
-}
-#endif
#endif /* CONFIG_SMP */
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -1071,18 +1024,6 @@ struct migration_arg {
static int migration_cpu_stop(void *data);
-static bool check_task_state(struct task_struct *p, long match_state)
-{
- bool match = false;
-
- raw_spin_lock_irq(&p->pi_lock);
- if (p->state == match_state || p->saved_state == match_state)
- match = true;
- raw_spin_unlock_irq(&p->pi_lock);
-
- return match;
-}
-
/*
* wait_task_inactive - wait for a thread to unschedule.
*
@@ -1127,7 +1068,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && !check_task_state(p, match_state))
+ if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
@@ -1142,8 +1083,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
on_rq = p->on_rq;
ncsw = 0;
- if (!match_state || p->state == match_state
- || p->saved_state == match_state)
+ if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
@@ -1289,12 +1229,6 @@ out:
}
}
- /*
- * Clear PF_NO_SETAFFINITY, otherwise we wreckage
- * migrate_disable/enable. See optimization for
- * PF_NO_SETAFFINITY tasks there.
- */
- p->flags &= ~PF_NO_SETAFFINITY;
return dest_cpu;
}
@@ -1374,6 +1308,10 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
+
+ /* if a worker is waking up, notify workqueue */
+ if (p->flags & PF_WQ_WORKER)
+ wq_worker_waking_up(p, cpu_of(rq));
}
/*
@@ -1548,27 +1486,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
- if (!(p->state & state)) {
- /*
- * The task might be running due to a spinlock sleeper
- * wakeup. Check the saved state and set it to running
- * if the wakeup condition is true.
- */
- if (!(wake_flags & WF_LOCK_SLEEPER)) {
- if (p->saved_state & state) {
- p->saved_state = TASK_RUNNING;
- success = 1;
- }
- }
+ if (!(p->state & state))
goto out;
- }
-
- /*
- * If this is a regular wakeup, then we can unconditionally
- * clear the saved state of a "lock sleeper".
- */
- if (!(wake_flags & WF_LOCK_SLEEPER))
- p->saved_state = TASK_RUNNING;
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
@@ -1611,6 +1530,42 @@ out:
}
/**
+ * try_to_wake_up_local - try to wake up a local task with rq lock held
+ * @p: the thread to be awakened
+ *
+ * Put @p on the run-queue if it's not already there. The caller must
+ * ensure that this_rq() is locked, @p is bound to this_rq() and not
+ * the current task.
+ */
+static void try_to_wake_up_local(struct task_struct *p)
+{
+ struct rq *rq = task_rq(p);
+
+ if (WARN_ON_ONCE(rq != this_rq()) ||
+ WARN_ON_ONCE(p == current))
+ return;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (!raw_spin_trylock(&p->pi_lock)) {
+ raw_spin_unlock(&rq->lock);
+ raw_spin_lock(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
+ }
+
+ if (!(p->state & TASK_NORMAL))
+ goto out;
+
+ if (!p->on_rq)
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+
+ ttwu_do_wakeup(rq, p, 0);
+ ttwu_stat(p, smp_processor_id(), 0);
+out:
+ raw_spin_unlock(&p->pi_lock);
+}
+
+/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
@@ -1624,23 +1579,11 @@ out:
*/
int wake_up_process(struct task_struct *p)
{
- WARN_ON(__task_is_stopped_or_traced(p));
+ WARN_ON(task_is_stopped_or_traced(p));
return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
-/**
- * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
- * @p: The process to be woken up.
- *
- * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
- * the nature of the wakeup.
- */
-int wake_up_lock_sleeper(struct task_struct *p)
-{
- return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
-}
-
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
@@ -1778,9 +1721,6 @@ void sched_fork(struct task_struct *p)
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
-#ifdef CONFIG_HAVE_PREEMPT_LAZY
- task_thread_info(p)->preempt_lazy_count = 0;
-#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
@@ -1947,12 +1887,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
- /*
- * We use mmdrop_delayed() here so we don't have to do the
- * full __mmdrop() when we are the last user.
- */
if (mm)
- mmdrop_delayed(mm);
+ mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
@@ -2296,13 +2232,8 @@ void __kprobes add_preempt_count(int val)
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
- if (preempt_count() == val) {
- unsigned long ip = get_parent_ip(CALLER_ADDR1);
-#ifdef CONFIG_DEBUG_PREEMPT
- current->preempt_disable_ip = ip;
-#endif
- trace_preempt_off(CALLER_ADDR0, ip);
- }
+ if (preempt_count() == val)
+ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(add_preempt_count);
@@ -2345,13 +2276,6 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (in_atomic_preempt_off()) {
- pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
- pr_cont("\n");
- }
-#endif
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
@@ -2375,133 +2299,6 @@ static inline void schedule_debug(struct task_struct *prev)
schedstat_inc(this_rq(), sched_count);
}
-#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
-#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
-#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
-#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
-
-static inline void update_migrate_disable(struct task_struct *p)
-{
- const struct cpumask *mask;
-
- if (likely(!p->migrate_disable))
- return;
-
- /* Did we already update affinity? */
- if (unlikely(migrate_disabled_updated(p)))
- return;
-
- /*
- * Since this is always current we can get away with only locking
- * rq->lock, the ->cpus_allowed value can normally only be changed
- * while holding both p->pi_lock and rq->lock, but seeing that this
- * is current, we cannot actually be waking up, so all code that
- * relies on serialization against p->pi_lock is out of scope.
- *
- * Having rq->lock serializes us against things like
- * set_cpus_allowed_ptr() that can still happen concurrently.
- */
- mask = tsk_cpus_allowed(p);
-
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */
- p->nr_cpus_allowed = 1;
-
- /* Let migrate_enable know to fix things back up */
- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
-}
-
-void migrate_disable(void)
-{
- struct task_struct *p = current;
-
- if (in_atomic()) {
-#ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic++;
-#endif
- return;
- }
-
-#ifdef CONFIG_SCHED_DEBUG
- if (unlikely(p->migrate_disable_atomic)) {
- tracing_off();
- WARN_ON_ONCE(1);
- }
-#endif
-
- if (p->migrate_disable) {
- p->migrate_disable++;
- return;
- }
-
- preempt_disable();
- preempt_lazy_disable();
- pin_current_cpu();
- p->migrate_disable = 1;
- preempt_enable();
-}
-EXPORT_SYMBOL(migrate_disable);
-
-void migrate_enable(void)
-{
- struct task_struct *p = current;
- const struct cpumask *mask;
- unsigned long flags;
- struct rq *rq;
-
- if (in_atomic()) {
-#ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic--;
-#endif
- return;
- }
-
-#ifdef CONFIG_SCHED_DEBUG
- if (unlikely(p->migrate_disable_atomic)) {
- tracing_off();
- WARN_ON_ONCE(1);
- }
-#endif
- WARN_ON_ONCE(p->migrate_disable <= 0);
-
- if (migrate_disable_count(p) > 1) {
- p->migrate_disable--;
- return;
- }
-
- preempt_disable();
- if (unlikely(migrate_disabled_updated(p))) {
- /*
- * Undo whatever update_migrate_disable() did, also see there
- * about locking.
- */
- rq = this_rq();
- raw_spin_lock_irqsave(&rq->lock, flags);
-
- /*
- * Clearing migrate_disable causes tsk_cpus_allowed to
- * show the tasks original cpu affinity.
- */
- p->migrate_disable = 0;
- mask = tsk_cpus_allowed(p);
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->nr_cpus_allowed = cpumask_weight(mask);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- } else
- p->migrate_disable = 0;
-
- unpin_current_cpu();
- preempt_enable();
- preempt_lazy_enable();
-}
-EXPORT_SYMBOL(migrate_enable);
-#else
-static inline void update_migrate_disable(struct task_struct *p) { }
-#define migrate_disabled_updated(p) 0
-#endif
-
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->on_rq || rq->skip_clock_update < 0)
@@ -2601,8 +2398,6 @@ need_resched:
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
- update_migrate_disable(prev);
-
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2610,6 +2405,19 @@ need_resched:
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
+
+ /*
+ * If a worker went to sleep, notify and ask workqueue
+ * whether it wants to wake up a task to maintain
+ * concurrency.
+ */
+ if (prev->flags & PF_WQ_WORKER) {
+ struct task_struct *to_wakeup;
+
+ to_wakeup = wq_worker_sleeping(prev, cpu);
+ if (to_wakeup)
+ try_to_wake_up_local(to_wakeup);
+ }
}
switch_count = &prev->nvcsw;
}
@@ -2622,7 +2430,6 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
- clear_tsk_need_resched_lazy(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
@@ -2653,14 +2460,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
-
- /*
- * If a worker went to sleep, notify and ask workqueue whether
- * it wants to wake up a task to maintain concurrency.
- */
- if (tsk->flags & PF_WQ_WORKER)
- wq_worker_sleeping(tsk);
-
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
@@ -2669,19 +2468,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
-static inline void sched_update_worker(struct task_struct *tsk)
-{
- if (tsk->flags & PF_WQ_WORKER)
- wq_worker_running(tsk);
-}
-
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
- sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
@@ -2727,26 +2519,9 @@ asmlinkage void __sched notrace preempt_schedule(void)
if (likely(!preemptible()))
return;
-#ifdef CONFIG_PREEMPT_LAZY
- /*
- * Check for lazy preemption
- */
- if (current_thread_info()->preempt_lazy_count &&
- !test_thread_flag(TIF_NEED_RESCHED))
- return;
-#endif
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
- /*
- * The add/subtract must not be traced by the function
- * tracer. But we still want to account for the
- * preempt off latency tracer. Since the _notrace versions
- * of add/subtract skip the accounting for latency tracer
- * we must force it manually.
- */
- start_critical_timings();
__schedule();
- stop_critical_timings();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
@@ -2919,10 +2694,10 @@ void complete(struct completion *x)
{
unsigned long flags;
- raw_spin_lock_irqsave(&x->wait.lock, flags);
+ spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
- raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -2939,10 +2714,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
- raw_spin_lock_irqsave(&x->wait.lock, flags);
+ spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
- raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -2951,20 +2726,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
- DEFINE_SWAITER(wait);
+ DECLARE_WAITQUEUE(wait, current);
- swait_prepare_locked(&x->wait, &wait);
+ __add_wait_queue_tail_exclusive(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
- raw_spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irq(&x->wait.lock);
timeout = action(timeout);
- raw_spin_lock_irq(&x->wait.lock);
+ spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
- swait_finish_locked(&x->wait, &wait);
+ __remove_wait_queue(&x->wait, &wait);
if (!x->done)
return timeout;
}
@@ -2978,9 +2753,9 @@ __wait_for_common(struct completion *x,
{
might_sleep();
- raw_spin_lock_irq(&x->wait.lock);
+ spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state);
- raw_spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irq(&x->wait.lock);
return timeout;
}
@@ -3156,12 +2931,12 @@ bool try_wait_for_completion(struct completion *x)
unsigned long flags;
int ret = 1;
- raw_spin_lock_irqsave(&x->wait.lock, flags);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
- raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -3179,10 +2954,10 @@ bool completion_done(struct completion *x)
unsigned long flags;
int ret = 1;
- raw_spin_lock_irqsave(&x->wait.lock, flags);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
- raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
@@ -3243,8 +3018,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
- * Used by the rt_mutex code to implement priority inheritance
- * logic. Call site only calls if the priority of the task changed.
+ * Used by the rt_mutex code to implement priority inheritance logic.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
@@ -3475,25 +3249,20 @@ static struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current;
}
-static void __setscheduler_params(struct task_struct *p, int policy, int prio)
-{
- p->policy = policy;
- p->rt_priority = prio;
- p->normal_prio = normal_prio(p);
- set_load_weight(p);
-}
-
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
- __setscheduler_params(p, policy, prio);
+ p->policy = policy;
+ p->rt_priority = prio;
+ p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
+ set_load_weight(p);
}
/*
@@ -3515,7 +3284,6 @@ static bool check_same_owner(struct task_struct *p)
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
- int newprio = MAX_RT_PRIO - 1 - param->sched_priority;
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
@@ -3611,13 +3379,10 @@ recheck:
}
/*
- * If not changing anything there's no need to proceed
- * further, but store a possible modification of
- * reset_on_fork.
+ * If not changing anything there's no need to proceed further:
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
- p->sched_reset_on_fork = reset_on_fork;
task_rq_unlock(rq, p, &flags);
return 0;
}
@@ -3643,25 +3408,6 @@ recheck:
task_rq_unlock(rq, p, &flags);
goto recheck;
}
-
- p->sched_reset_on_fork = reset_on_fork;
- oldprio = p->prio;
-
- /*
- * Special case for priority boosted tasks.
- *
- * If the new priority is lower or equal (user space view)
- * than the current (boosted) priority, we just store the new
- * normal parameters and do not touch the scheduler class and
- * the runqueue. This will be done when the task deboost
- * itself.
- */
- if (rt_mutex_check_prio(p, newprio)) {
- __setscheduler_params(p, policy, param->sched_priority);
- task_rq_unlock(rq, p, &flags);
- return 0;
- }
-
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -3669,18 +3415,17 @@ recheck:
if (running)
p->sched_class->put_prev_task(rq, p);
+ p->sched_reset_on_fork = reset_on_fork;
+
+ oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq) {
- /*
- * We enqueue to tail when the priority of a task is
- * increased (user space view).
- */
- enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
- }
+ if (on_rq)
+ enqueue_task(rq, p, 0);
+
check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags);
@@ -4056,17 +3801,9 @@ static inline int should_resched(void)
static void __cond_resched(void)
{
- do {
- add_preempt_count(PREEMPT_ACTIVE);
- __schedule();
- sub_preempt_count(PREEMPT_ACTIVE);
- /*
- * Check again in case we missed a preemption
- * opportunity between schedule and now.
- */
- barrier();
-
- } while (need_resched());
+ add_preempt_count(PREEMPT_ACTIVE);
+ __schedule();
+ sub_preempt_count(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
@@ -4107,7 +3844,6 @@ int __cond_resched_lock(spinlock_t *lock)
}
EXPORT_SYMBOL(__cond_resched_lock);
-#ifndef CONFIG_PREEMPT_RT_FULL
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -4121,7 +3857,6 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
-#endif
/**
* yield - yield the current processor to other threads.
@@ -4471,7 +4206,6 @@ void init_idle(struct task_struct *idle, int cpu)
rcu_read_unlock();
rq->curr = rq->idle = idle;
- idle->on_rq = 1;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
@@ -4479,9 +4213,7 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;
-#ifdef CONFIG_HAVE_PREEMPT_LAZY
- task_thread_info(idle)->preempt_lazy_count = 0;
-#endif
+
/*
* The idle tasks have their own, simple scheduling class:
*/
@@ -4496,90 +4228,11 @@ void init_idle(struct task_struct *idle, int cpu)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (!migrate_disabled_updated(p)) {
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
- }
- cpumask_copy(&p->cpus_allowed, new_mask);
-}
-
-static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
-static DEFINE_MUTEX(sched_down_mutex);
-static cpumask_t sched_down_cpumask;
-
-void tell_sched_cpu_down_begin(int cpu)
-{
- mutex_lock(&sched_down_mutex);
- cpumask_set_cpu(cpu, &sched_down_cpumask);
- mutex_unlock(&sched_down_mutex);
-}
-
-void tell_sched_cpu_down_done(int cpu)
-{
- mutex_lock(&sched_down_mutex);
- cpumask_clear_cpu(cpu, &sched_down_cpumask);
- mutex_unlock(&sched_down_mutex);
-}
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
-/**
- * migrate_me - try to move the current task off this cpu
- *
- * Used by the pin_current_cpu() code to try to get tasks
- * to move off the current CPU as it is going down.
- * It will only move the task if the task isn't pinned to
- * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
- * and the task has to be in a RUNNING state. Otherwise the
- * movement of the task will wake it up (change its state
- * to running) when the task did not expect it.
- *
- * Returns 1 if it succeeded in moving the current task
- * 0 otherwise.
- */
-int migrate_me(void)
-{
- struct task_struct *p = current;
- struct migration_arg arg;
- struct cpumask *cpumask;
- struct cpumask *mask;
- unsigned long flags;
- unsigned int dest_cpu;
- struct rq *rq;
-
- /*
- * We can not migrate tasks bounded to a CPU or tasks not
- * running. The movement of the task will wake it up.
- */
- if (p->flags & PF_NO_SETAFFINITY || p->state)
- return 0;
-
- mutex_lock(&sched_down_mutex);
- rq = task_rq_lock(p, &flags);
-
- cpumask = &__get_cpu_var(sched_cpumasks);
- mask = &p->cpus_allowed;
-
- cpumask_andnot(cpumask, mask, &sched_down_cpumask);
-
- if (!cpumask_weight(cpumask)) {
- /* It's only on this CPU? */
- task_rq_unlock(rq, p, &flags);
- mutex_unlock(&sched_down_mutex);
- return 0;
- }
-
- dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
-
- arg.task = p;
- arg.dest_cpu = dest_cpu;
-
- task_rq_unlock(rq, p, &flags);
-
- stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
- tlb_migrate_finish(p->mm);
- mutex_unlock(&sched_down_mutex);
-
- return 1;
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
@@ -4625,7 +4278,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4714,8 +4367,6 @@ static int migration_cpu_stop(void *data)
#ifdef CONFIG_HOTPLUG_CPU
-static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
-
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
@@ -4728,12 +4379,7 @@ void idle_task_exit(void)
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
-
- /*
- * Defer the cleanup to an alive cpu. On RT we can neither
- * call mmdrop() nor mmdrop_delayed() from here.
- */
- per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ mmdrop(mm);
}
/*
@@ -5057,10 +4703,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD:
calc_load_migrate(rq);
- if (per_cpu(idle_last_mm, cpu)) {
- mmdrop(per_cpu(idle_last_mm, cpu));
- per_cpu(idle_last_mm, cpu) = NULL;
- }
break;
#endif
}
@@ -6933,8 +6575,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
- int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
- sched_rcu_preempt_depth();
+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == preempt_offset);
}
@@ -6944,8 +6585,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
- if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
- !is_idle_task(current)) ||
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
system_state != SYSTEM_RUNNING || oops_in_progress)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
@@ -6963,13 +6603,6 @@ void __might_sleep(const char *file, int line, int preempt_offset)
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (!preempt_count_equals(preempt_offset)) {
- pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
- pr_cont("\n");
- }
-#endif
dump_stack();
}
EXPORT_SYMBOL(__might_sleep);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 1681f49..9994791 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -655,45 +655,37 @@ static void __vtime_account_system(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk)
{
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_gen_account_irq_exit(struct task_struct *tsk)
{
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
if (context_tracking_in_user())
tsk->vtime_snap_whence = VTIME_USER;
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_account_user(struct task_struct *tsk)
{
cputime_t delta_cpu;
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
delta_cpu = get_vtime_delta(tsk);
tsk->vtime_snap_whence = VTIME_SYS;
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_user_enter(struct task_struct *tsk)
{
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
tsk->vtime_snap_whence = VTIME_USER;
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_guest_enter(struct task_struct *tsk)
@@ -705,23 +697,19 @@ void vtime_guest_enter(struct task_struct *tsk)
* synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta.
*/
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
current->flags |= PF_VCPU;
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
- raw_spin_lock(&tsk->vtime_lock);
- write_seqcount_begin(&tsk->vtime_seq);
+ write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
current->flags &= ~PF_VCPU;
- write_seqcount_end(&tsk->vtime_seq);
- raw_spin_unlock(&tsk->vtime_lock);
+ write_sequnlock(&tsk->vtime_seqlock);
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
@@ -734,30 +722,24 @@ void vtime_account_idle(struct task_struct *tsk)
void arch_vtime_task_switch(struct task_struct *prev)
{
- raw_spin_lock(&prev->vtime_lock);
- write_seqcount_begin(&prev->vtime_seq);
+ write_seqlock(&prev->vtime_seqlock);
prev->vtime_snap_whence = VTIME_SLEEPING;
- write_seqcount_end(&prev->vtime_seq);
- raw_spin_unlock(&prev->vtime_lock);
+ write_sequnlock(&prev->vtime_seqlock);
- raw_spin_lock(&current->vtime_lock);
- write_seqcount_begin(&current->vtime_seq);
+ write_seqlock(&current->vtime_seqlock);
current->vtime_snap_whence = VTIME_SYS;
current->vtime_snap = sched_clock_cpu(smp_processor_id());
- write_seqcount_end(&current->vtime_seq);
- raw_spin_unlock(&current->vtime_lock);
+ write_sequnlock(&current->vtime_seqlock);
}
void vtime_init_idle(struct task_struct *t, int cpu)
{
unsigned long flags;
- raw_spin_lock_irqsave(&t->vtime_lock, flags);
- write_seqcount_begin(&t->vtime_seq);
+ write_seqlock_irqsave(&t->vtime_seqlock, flags);
t->vtime_snap_whence = VTIME_SYS;
t->vtime_snap = sched_clock_cpu(cpu);
- write_seqcount_end(&t->vtime_seq);
- raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
+ write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
}
cputime_t task_gtime(struct task_struct *t)
@@ -766,13 +748,13 @@ cputime_t task_gtime(struct task_struct *t)
cputime_t gtime;
do {
- seq = read_seqcount_begin(&t->vtime_seq);
+ seq = read_seqbegin(&t->vtime_seqlock);
gtime = t->gtime;
if (t->flags & PF_VCPU)
gtime += vtime_delta(t);
- } while (read_seqcount_retry(&t->vtime_seq, seq));
+ } while (read_seqretry(&t->vtime_seqlock, seq));
return gtime;
}
@@ -795,7 +777,7 @@ fetch_task_cputime(struct task_struct *t,
*udelta = 0;
*sdelta = 0;
- seq = read_seqcount_begin(&t->vtime_seq);
+ seq = read_seqbegin(&t->vtime_seqlock);
if (u_dst)
*u_dst = *u_src;
@@ -819,7 +801,7 @@ fetch_task_cputime(struct task_struct *t,
if (t->vtime_snap_whence == VTIME_SYS)
*sdelta = delta;
}
- } while (read_seqcount_retry(&t->vtime_seq, seq));
+ } while (read_seqretry(&t->vtime_seqlock, seq));
}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 70812af..fd9ca1d 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -256,9 +256,6 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
-#ifdef CONFIG_SMP
- P(rt_nr_migratory);
-#endif
#undef PN
#undef P
@@ -588,10 +585,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#endif
P(policy);
P(prio);
-#ifdef CONFIG_PREEMPT_RT_FULL
- P(migrate_disable);
-#endif
- P(nr_cpus_allowed);
#undef PN
#undef __PN
#undef P
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0af1448..790e2fc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1902,7 +1902,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
- resched_task_lazy(rq_of(cfs_rq)->curr);
+ resched_task(rq_of(cfs_rq)->curr);
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
@@ -1926,7 +1926,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
- resched_task_lazy(rq_of(cfs_rq)->curr);
+ resched_task(rq_of(cfs_rq)->curr);
}
static void
@@ -2047,7 +2047,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
- resched_task_lazy(rq_of(cfs_rq)->curr);
+ resched_task(rq_of(cfs_rq)->curr);
return;
}
/*
@@ -2237,7 +2237,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- resched_task_lazy(rq_of(cfs_rq)->curr);
+ resched_task(rq_of(cfs_rq)->curr);
}
static __always_inline
@@ -2837,7 +2837,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (rq->curr == p)
- resched_task_lazy(p);
+ resched_task(p);
return;
}
@@ -3704,7 +3704,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
- resched_task_lazy(curr);
+ resched_task(curr);
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
@@ -5979,7 +5979,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
- resched_task_lazy(rq->curr);
+ resched_task(rq->curr);
}
se->vruntime -= cfs_rq->min_vruntime;
@@ -6004,7 +6004,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
- resched_task_lazy(rq->curr);
+ resched_task(rq->curr);
} else
check_preempt_curr(rq, p, 0);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 4594051..99399f8 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -50,18 +50,11 @@ SCHED_FEAT(LB_BIAS, true)
*/
SCHED_FEAT(NONTASK_POWER, true)
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, true)
-#else
-SCHED_FEAT(TTWU_QUEUE, false)
-# ifdef CONFIG_PREEMPT_LAZY
-SCHED_FEAT(PREEMPT_LAZY, true)
-# endif
-#endif
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 240fc60..ff04e1a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -43,7 +43,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rt_b->rt_period_timer.irqsafe = 1;
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2843303..4f31059 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -898,7 +898,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
-#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1046,15 +1045,6 @@ extern void init_sched_fair_class(void);
extern void resched_task(struct task_struct *p);
extern void resched_cpu(int cpu);
-#ifdef CONFIG_PREEMPT_LAZY
-extern void resched_task_lazy(struct task_struct *tsk);
-#else
-static inline void resched_task_lazy(struct task_struct *tsk)
-{
- resched_task(tsk);
-}
-#endif
-
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
diff --git a/kernel/signal.c b/kernel/signal.c
index 3d32f54..ded28b9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -14,7 +14,6 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/sched/rt.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
@@ -350,45 +349,13 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
-#ifdef __HAVE_ARCH_CMPXCHG
-static inline struct sigqueue *get_task_cache(struct task_struct *t)
-{
- struct sigqueue *q = t->sigqueue_cache;
-
- if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
- return NULL;
- return q;
-}
-
-static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-{
- if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
- return 0;
- return 1;
-}
-
-#else
-
-static inline struct sigqueue *get_task_cache(struct task_struct *t)
-{
- return NULL;
-}
-
-static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-{
- return 1;
-}
-
-#endif
-
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
-__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
- int override_rlimit, int fromslab)
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
{
struct sigqueue *q = NULL;
struct user_struct *user;
@@ -405,10 +372,7 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
- if (!fromslab)
- q = get_task_cache(t);
- if (!q)
- q = kmem_cache_alloc(sigqueue_cachep, flags);
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
@@ -425,13 +389,6 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
return q;
}
-static struct sigqueue *
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
- int override_rlimit)
-{
- return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
-}
-
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
@@ -441,21 +398,6 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
-static void sigqueue_free_current(struct sigqueue *q)
-{
- struct user_struct *up;
-
- if (q->flags & SIGQUEUE_PREALLOC)
- return;
-
- up = q->user;
- if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
- atomic_dec(&up->sigpending);
- free_uid(up);
- } else
- __sigqueue_free(q);
-}
-
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
@@ -469,21 +411,6 @@ void flush_sigqueue(struct sigpending *queue)
}
/*
- * Called from __exit_signal. Flush tsk->pending and
- * tsk->sigqueue_cache
- */
-void flush_task_sigqueue(struct task_struct *tsk)
-{
- struct sigqueue *q;
-
- flush_sigqueue(&tsk->pending);
-
- q = get_task_cache(tsk);
- if (q)
- kmem_cache_free(sigqueue_cachep, q);
-}
-
-/*
* Flush all pending signals for a task.
*/
void __flush_signals(struct task_struct *t)
@@ -635,7 +562,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
- sigqueue_free_current(first);
+ __sigqueue_free(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
@@ -681,8 +608,6 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
int signr;
- WARN_ON_ONCE(tsk != current);
-
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
@@ -1305,8 +1230,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
-static int
-do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+int
+force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1331,39 +1256,6 @@ do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
-int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-{
-/*
- * On some archs, PREEMPT_RT has to delay sending a signal from a trap
- * since it can not enable preemption, and the signal code's spin_locks
- * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
- * send the signal on exit of the trap.
- */
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- if (in_atomic()) {
- if (WARN_ON_ONCE(t != current))
- return 0;
- if (WARN_ON_ONCE(t->forced_info.si_signo))
- return 0;
-
- if (is_si_special(info)) {
- WARN_ON_ONCE(info != SEND_SIG_PRIV);
- t->forced_info.si_signo = sig;
- t->forced_info.si_errno = 0;
- t->forced_info.si_code = SI_KERNEL;
- t->forced_info.si_pid = 0;
- t->forced_info.si_uid = 0;
- } else {
- t->forced_info = *info;
- }
-
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
- return 0;
- }
-#endif
- return do_force_sig_info(sig, info, t);
-}
-
/*
* Nuke all other threads in the group.
*/
@@ -1394,12 +1286,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
struct sighand_struct *sighand;
for (;;) {
- local_irq_save_nort(*flags);
+ local_irq_save(*flags);
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL)) {
rcu_read_unlock();
- local_irq_restore_nort(*flags);
+ local_irq_restore(*flags);
break;
}
@@ -1410,7 +1302,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
- local_irq_restore_nort(*flags);
+ local_irq_restore(*flags);
}
return sighand;
@@ -1655,8 +1547,7 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
- /* Preallocated sigqueue objects always from the slabcache ! */
- struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+ struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
if (q)
q->flags |= SIGQUEUE_PREALLOC;
@@ -2017,7 +1908,15 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ *
+ * XXX: implement read_unlock_no_resched().
+ */
+ preempt_disable();
read_unlock(&tasklist_lock);
+ preempt_enable_no_resched();
freezable_schedule();
} else {
/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 15ad603..d7d498d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -21,12 +21,10 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
-#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
-#include <linux/locallock.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -64,98 +62,6 @@ char *softirq_to_name[NR_SOFTIRQS] = {
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
-#ifdef CONFIG_NO_HZ_COMMON
-# ifdef CONFIG_PREEMPT_RT_FULL
-
-struct softirq_runner {
- struct task_struct *runner[NR_SOFTIRQS];
-};
-
-static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
-
-static inline void softirq_set_runner(unsigned int sirq)
-{
- struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
-
- sr->runner[sirq] = current;
-}
-
-static inline void softirq_clr_runner(unsigned int sirq)
-{
- struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
-
- sr->runner[sirq] = NULL;
-}
-
-/*
- * On preempt-rt a softirq running context might be blocked on a
- * lock. There might be no other runnable task on this CPU because the
- * lock owner runs on some other CPU. So we have to go into idle with
- * the pending bit set. Therefor we need to check this otherwise we
- * warn about false positives which confuses users and defeats the
- * whole purpose of this test.
- *
- * This code is called with interrupts disabled.
- */
-void softirq_check_pending_idle(void)
-{
- static int rate_limit;
- struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
- u32 warnpending;
- int i;
-
- if (rate_limit >= 10)
- return;
-
- warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
- for (i = 0; i < NR_SOFTIRQS; i++) {
- struct task_struct *tsk = sr->runner[i];
-
- /*
- * The wakeup code in rtmutex.c wakes up the task
- * _before_ it sets pi_blocked_on to NULL under
- * tsk->pi_lock. So we need to check for both: state
- * and pi_blocked_on.
- */
- if (tsk) {
- raw_spin_lock(&tsk->pi_lock);
- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
- /* Clear all bits pending in that task */
- warnpending &= ~(tsk->softirqs_raised);
- warnpending &= ~(1 << i);
- }
- raw_spin_unlock(&tsk->pi_lock);
- }
- }
-
- if (warnpending) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- warnpending);
- rate_limit++;
- }
-}
-# else
-/*
- * On !PREEMPT_RT we just printk rate limited:
- */
-void softirq_check_pending_idle(void)
-{
- static int rate_limit;
-
- if (rate_limit < 10 &&
- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- local_softirq_pending());
- rate_limit++;
- }
-}
-# endif
-
-#else /* !CONFIG_NO_HZ_COMMON */
-static inline void softirq_set_runner(unsigned int sirq) { }
-static inline void softirq_clr_runner(unsigned int sirq) { }
-#endif
-
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
@@ -171,57 +77,6 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
-static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs)
-{
- struct softirq_action *h = softirq_vec + vec_nr;
- unsigned int prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
-
- if (unlikely(prev_count != preempt_count())) {
- pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n",
- vec_nr, softirq_to_name[vec_nr], h->action,
- prev_count, (unsigned int) preempt_count());
- preempt_count() = prev_count;
- }
- if (need_rcu_bh_qs)
- rcu_bh_qs(cpu);
-}
-
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline int ksoftirqd_softirq_pending(void)
-{
- return local_softirq_pending();
-}
-
-static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
-{
- unsigned int vec_nr;
-
- local_irq_enable();
- for (vec_nr = 0; pending; vec_nr++, pending >>= 1) {
- if (pending & 1)
- handle_softirq(vec_nr, cpu, need_rcu_bh_qs);
- }
- local_irq_disable();
-}
-
-static void run_ksoftirqd(unsigned int cpu)
-{
- local_irq_disable();
- if (ksoftirqd_softirq_pending()) {
- __do_softirq();
- rcu_note_context_switch(cpu);
- local_irq_enable();
- cond_resched();
- return;
- }
- local_irq_enable();
-}
-
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -354,51 +209,14 @@ EXPORT_SYMBOL(local_bh_enable_ip);
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
-#ifdef CONFIG_TRACE_IRQFLAGS
-/*
- * Convoluted means of passing __do_softirq() a message through the various
- * architecture execute_on_stack() bits.
- *
- * When we run softirqs from irq_exit() and thus on the hardirq stack we need
- * to keep the lockdep irq context tracking as tight as possible in order to
- * not miss-qualify lock contexts and miss possible deadlocks.
- */
-static DEFINE_PER_CPU(int, softirq_from_hardirq);
-
-static inline void lockdep_softirq_from_hardirq(void)
-{
- this_cpu_write(softirq_from_hardirq, 1);
-}
-
-static inline void lockdep_softirq_start(void)
-{
- if (this_cpu_read(softirq_from_hardirq))
- trace_hardirq_exit();
- lockdep_softirq_enter();
-}
-
-static inline void lockdep_softirq_end(void)
-{
- lockdep_softirq_exit();
- if (this_cpu_read(softirq_from_hardirq)) {
- this_cpu_write(softirq_from_hardirq, 0);
- trace_hardirq_enter();
- }
-}
-
-#else
-static inline void lockdep_softirq_from_hardirq(void) { }
-static inline void lockdep_softirq_start(void) { }
-static inline void lockdep_softirq_end(void) { }
-#endif
-
asmlinkage void __do_softirq(void)
{
+ struct softirq_action *h;
+ __u32 pending;
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ int cpu;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
- __u32 pending;
- int cpu;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -411,14 +229,43 @@ asmlinkage void __do_softirq(void)
account_irq_enter_time(current);
__local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
- lockdep_softirq_start();
+ lockdep_softirq_enter();
cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
- handle_pending_softirqs(pending, cpu, 1);
+ local_irq_enable();
+
+ h = softirq_vec;
+
+ do {
+ if (pending & 1) {
+ unsigned int vec_nr = h - softirq_vec;
+ int prev_count = preempt_count();
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR "huh, entered softirq %u %s %p"
+ "with preempt_count %08x,"
+ " exited with %08x?\n", vec_nr,
+ softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
+ preempt_count() = prev_count;
+ }
+
+ rcu_bh_qs(cpu);
+ }
+ h++;
+ pending >>= 1;
+ } while (pending);
+
+ local_irq_disable();
pending = local_softirq_pending();
if (pending) {
@@ -429,7 +276,8 @@ restart:
wakeup_softirqd();
}
- lockdep_softirq_end();
+ lockdep_softirq_exit();
+
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
@@ -458,261 +306,6 @@ asmlinkage void do_softirq(void)
#endif
/*
- * This function must run with irqs disabled!
- */
-void raise_softirq_irqoff(unsigned int nr)
-{
- __raise_softirq_irqoff(nr);
-
- /*
- * If we're in an interrupt or softirq, we're done
- * (this also catches softirq-disabled code). We will
- * actually run the softirq once we return from
- * the irq or softirq.
- *
- * Otherwise we wake up ksoftirqd to make sure we
- * schedule the softirq soon.
- */
- if (!in_interrupt())
- wakeup_softirqd();
-}
-
-void __raise_softirq_irqoff(unsigned int nr)
-{
- trace_softirq_raise(nr);
- or_softirq_pending(1UL << nr);
-}
-
-static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-static void ksoftirqd_set_sched_params(unsigned int cpu) { }
-static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
-
-#else /* !PREEMPT_RT_FULL */
-
-/*
- * On RT we serialize softirq execution with a cpu local lock per softirq
- */
-static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
-
-void __init softirq_early_init(void)
-{
- int i;
-
- for (i = 0; i < NR_SOFTIRQS; i++)
- local_irq_lock_init(local_softirq_locks[i]);
-}
-
-static void lock_softirq(int which)
-{
- local_lock(local_softirq_locks[which]);
-}
-
-static void unlock_softirq(int which)
-{
- local_unlock(local_softirq_locks[which]);
-}
-
-static void do_single_softirq(int which, int need_rcu_bh_qs)
-{
- unsigned long old_flags = current->flags;
-
- current->flags &= ~PF_MEMALLOC;
- vtime_account_irq_enter(current);
- current->flags |= PF_IN_SOFTIRQ;
- lockdep_softirq_enter();
- local_irq_enable();
- handle_softirq(which, smp_processor_id(), need_rcu_bh_qs);
- local_irq_disable();
- lockdep_softirq_exit();
- current->flags &= ~PF_IN_SOFTIRQ;
- vtime_account_irq_enter(current);
- tsk_restore_flags(current, old_flags, PF_MEMALLOC);
-}
-
-/*
- * Called with interrupts disabled. Process softirqs which were raised
- * in current context (or on behalf of ksoftirqd).
- */
-static void do_current_softirqs(int need_rcu_bh_qs)
-{
- while (current->softirqs_raised) {
- int i = __ffs(current->softirqs_raised);
- unsigned int pending, mask = (1U << i);
-
- current->softirqs_raised &= ~mask;
- local_irq_enable();
-
- /*
- * If the lock is contended, we boost the owner to
- * process the softirq or leave the critical section
- * now.
- */
- lock_softirq(i);
- local_irq_disable();
- softirq_set_runner(i);
- /*
- * Check with the local_softirq_pending() bits,
- * whether we need to process this still or if someone
- * else took care of it.
- */
- pending = local_softirq_pending();
- if (pending & mask) {
- set_softirq_pending(pending & ~mask);
- do_single_softirq(i, need_rcu_bh_qs);
- }
- softirq_clr_runner(i);
- unlock_softirq(i);
- WARN_ON(current->softirq_nestcnt != 1);
- }
-}
-
-void local_bh_disable(void)
-{
- if (++current->softirq_nestcnt == 1)
- migrate_disable();
-}
-EXPORT_SYMBOL(local_bh_disable);
-
-void local_bh_enable(void)
-{
- if (WARN_ON(current->softirq_nestcnt == 0))
- return;
-
- local_irq_disable();
- if (current->softirq_nestcnt == 1 && current->softirqs_raised)
- do_current_softirqs(1);
- local_irq_enable();
-
- if (--current->softirq_nestcnt == 0)
- migrate_enable();
-}
-EXPORT_SYMBOL(local_bh_enable);
-
-void local_bh_enable_ip(unsigned long ip)
-{
- local_bh_enable();
-}
-EXPORT_SYMBOL(local_bh_enable_ip);
-
-void _local_bh_enable(void)
-{
- if (WARN_ON(current->softirq_nestcnt == 0))
- return;
- if (--current->softirq_nestcnt == 0)
- migrate_enable();
-}
-EXPORT_SYMBOL(_local_bh_enable);
-
-int in_serving_softirq(void)
-{
- return current->flags & PF_IN_SOFTIRQ;
-}
-EXPORT_SYMBOL(in_serving_softirq);
-
-/* Called with preemption disabled */
-static void run_ksoftirqd(unsigned int cpu)
-{
- local_irq_disable();
- current->softirq_nestcnt++;
- do_current_softirqs(1);
- current->softirq_nestcnt--;
- rcu_note_context_switch(cpu);
- local_irq_enable();
-}
-
-/*
- * Called from netif_rx_ni(). Preemption enabled, but migration
- * disabled. So the cpu can't go away under us.
- */
-void thread_do_softirq(void)
-{
- if (!in_serving_softirq() && current->softirqs_raised) {
- current->softirq_nestcnt++;
- do_current_softirqs(0);
- current->softirq_nestcnt--;
- }
-}
-
-static void do_raise_softirq_irqoff(unsigned int nr)
-{
- trace_softirq_raise(nr);
- or_softirq_pending(1UL << nr);
-
- /*
- * If we are not in a hard interrupt and inside a bh disabled
- * region, we simply raise the flag on current. local_bh_enable()
- * will make sure that the softirq is executed. Otherwise we
- * delegate it to ksoftirqd.
- */
- if (!in_irq() && current->softirq_nestcnt)
- current->softirqs_raised |= (1U << nr);
- else if (__this_cpu_read(ksoftirqd))
- __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
-}
-
-void __raise_softirq_irqoff(unsigned int nr)
-{
- do_raise_softirq_irqoff(nr);
- if (!in_irq() && !current->softirq_nestcnt)
- wakeup_softirqd();
-}
-
-/*
- * This function must run with irqs disabled!
- */
-void raise_softirq_irqoff(unsigned int nr)
-{
- do_raise_softirq_irqoff(nr);
-
- /*
- * If we're in an hard interrupt we let irq return code deal
- * with the wakeup of ksoftirqd.
- */
- if (in_irq())
- return;
-
- /*
- * If we are in thread context but outside of a bh disabled
- * region, we need to wake ksoftirqd as well.
- *
- * CHECKME: Some of the places which do that could be wrapped
- * into local_bh_disable/enable pairs. Though it's unclear
- * whether this is worth the effort. To find those places just
- * raise a WARN() if the condition is met.
- */
- if (!current->softirq_nestcnt)
- wakeup_softirqd();
-}
-
-static inline int ksoftirqd_softirq_pending(void)
-{
- return current->softirqs_raised;
-}
-
-static inline void local_bh_disable_nort(void) { }
-static inline void _local_bh_enable_nort(void) { }
-
-static inline void ksoftirqd_set_sched_params(unsigned int cpu)
-{
- struct sched_param param = { .sched_priority = 1 };
-
- sched_setscheduler(current, SCHED_FIFO, &param);
- /* Take over all pending softirqs when starting */
- local_irq_disable();
- current->softirqs_raised = local_softirq_pending();
- local_irq_enable();
-}
-
-static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
-{
- struct sched_param param = { .sched_priority = 0 };
-
- sched_setscheduler(current, SCHED_NORMAL, &param);
-}
-
-#endif /* PREEMPT_RT_FULL */
-/*
* Enter an interrupt context.
*/
void irq_enter(void)
@@ -725,9 +318,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
- local_bh_disable_nort();
+ local_bh_disable();
tick_check_idle(cpu);
- _local_bh_enable_nort();
+ _local_bh_enable();
}
__irq_enter();
@@ -735,9 +328,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
if (!force_irqthreads) {
- lockdep_softirq_from_hardirq();
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
@@ -750,15 +341,6 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
-#else /* PREEMPT_RT_FULL */
- unsigned long flags;
-
- local_irq_save(flags);
- if (__this_cpu_read(ksoftirqd) &&
- __this_cpu_read(ksoftirqd)->softirqs_raised)
- wakeup_softirqd();
- local_irq_restore(flags);
-#endif
}
static inline void tick_irq_exit(void)
@@ -786,13 +368,33 @@ void irq_exit(void)
#endif
account_irq_exit_time(current);
+ trace_hardirq_exit();
sub_preempt_count(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
tick_irq_exit();
rcu_irq_exit();
- trace_hardirq_exit(); /* must be last! */
+}
+
+/*
+ * This function must run with irqs disabled!
+ */
+inline void raise_softirq_irqoff(unsigned int nr)
+{
+ __raise_softirq_irqoff(nr);
+
+ /*
+ * If we're in an interrupt or softirq, we're done
+ * (this also catches softirq-disabled code). We will
+ * actually run the softirq once we return from
+ * the irq or softirq.
+ *
+ * Otherwise we wake up ksoftirqd to make sure we
+ * schedule the softirq soon.
+ */
+ if (!in_interrupt())
+ wakeup_softirqd();
}
void raise_softirq(unsigned int nr)
@@ -804,6 +406,12 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ trace_softirq_raise(nr);
+ or_softirq_pending(1UL << nr);
+}
+
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
@@ -821,45 +429,15 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
-static void inline
-__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
-{
- if (tasklet_trylock(t)) {
-again:
- /* We may have been preempted before tasklet_trylock
- * and __tasklet_action may have already run.
- * So double check the sched bit while the takslet
- * is locked before adding it to the list.
- */
- if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
- t->next = NULL;
- *head->tail = t;
- head->tail = &(t->next);
- raise_softirq_irqoff(nr);
- tasklet_unlock(t);
- } else {
- /* This is subtle. If we hit the corner case above
- * It is possible that we get preempted right here,
- * and another task has successfully called
- * tasklet_schedule(), then this function, and
- * failed on the trylock. Thus we must be sure
- * before releasing the tasklet lock, that the
- * SCHED_BIT is clear. Otherwise the tasklet
- * may get its SCHED_BIT set, but not added to the
- * list
- */
- if (!tasklet_tryunlock(t))
- goto again;
- }
- }
-}
-
void __tasklet_schedule(struct tasklet_struct *t)
{
unsigned long flags;
local_irq_save(flags);
- __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
+ t->next = NULL;
+ *__this_cpu_read(tasklet_vec.tail) = t;
+ __this_cpu_write(tasklet_vec.tail, &(t->next));
+ raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -870,7 +448,10 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
- __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
+ t->next = NULL;
+ *__this_cpu_read(tasklet_hi_vec.tail) = t;
+ __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+ raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -878,117 +459,48 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
- __tasklet_hi_schedule(t);
-}
+ BUG_ON(!irqs_disabled());
-EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-
-void tasklet_enable(struct tasklet_struct *t)
-{
- if (!atomic_dec_and_test(&t->count))
- return;
- if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
- tasklet_schedule(t);
+ t->next = __this_cpu_read(tasklet_hi_vec.head);
+ __this_cpu_write(tasklet_hi_vec.head, t);
+ __raise_softirq_irqoff(HI_SOFTIRQ);
}
-EXPORT_SYMBOL(tasklet_enable);
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-void tasklet_hi_enable(struct tasklet_struct *t)
+static void tasklet_action(struct softirq_action *a)
{
- if (!atomic_dec_and_test(&t->count))
- return;
- if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
- tasklet_hi_schedule(t);
-}
-
-EXPORT_SYMBOL(tasklet_hi_enable);
+ struct tasklet_struct *list;
-static void
-__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
-{
- int loops = 1000000;
+ local_irq_disable();
+ list = __this_cpu_read(tasklet_vec.head);
+ __this_cpu_write(tasklet_vec.head, NULL);
+ __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
+ local_irq_enable();
while (list) {
struct tasklet_struct *t = list;
list = list->next;
- /*
- * Should always succeed - after a tasklist got on the
- * list (after getting the SCHED bit set from 0 to 1),
- * nothing but the tasklet softirq it got queued to can
- * lock it:
- */
- if (!tasklet_trylock(t)) {
- WARN_ON(1);
- continue;
- }
-
- t->next = NULL;
-
- /*
- * If we cannot handle the tasklet because it's disabled,
- * mark it as pending. tasklet_enable() will later
- * re-schedule the tasklet.
- */
- if (unlikely(atomic_read(&t->count))) {
-out_disabled:
- /* implicit unlock: */
- wmb();
- t->state = TASKLET_STATEF_PENDING;
- continue;
- }
-
- /*
- * After this point on the tasklet might be rescheduled
- * on another CPU, but it can only be added to another
- * CPU's tasklet list if we unlock the tasklet (which we
- * dont do yet).
- */
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- WARN_ON(1);
-
-again:
- t->func(t->data);
-
- /*
- * Try to unlock the tasklet. We must use cmpxchg, because
- * another CPU might have scheduled or disabled the tasklet.
- * We only allow the STATE_RUN -> 0 transition here.
- */
- while (!tasklet_tryunlock(t)) {
- /*
- * If it got disabled meanwhile, bail out:
- */
- if (atomic_read(&t->count))
- goto out_disabled;
- /*
- * If it got scheduled meanwhile, re-execute
- * the tasklet function:
- */
- if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- goto again;
- if (!--loops) {
- printk("hm, tasklet state: %08lx\n", t->state);
- WARN_ON(1);
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
tasklet_unlock(t);
- break;
+ continue;
}
+ tasklet_unlock(t);
}
- }
-}
-
-static void tasklet_action(struct softirq_action *a)
-{
- struct tasklet_struct *list;
- local_irq_disable();
- list = __get_cpu_var(tasklet_vec).head;
- __get_cpu_var(tasklet_vec).head = NULL;
- __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
- local_irq_enable();
-
- __tasklet_action(a, list);
+ local_irq_disable();
+ t->next = NULL;
+ *__this_cpu_read(tasklet_vec.tail) = t;
+ __this_cpu_write(tasklet_vec.tail, &(t->next));
+ __raise_softirq_irqoff(TASKLET_SOFTIRQ);
+ local_irq_enable();
+ }
}
static void tasklet_hi_action(struct softirq_action *a)
@@ -1001,7 +513,29 @@ static void tasklet_hi_action(struct softirq_action *a)
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
local_irq_enable();
- __tasklet_action(a, list);
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ tasklet_unlock(t);
+ continue;
+ }
+ tasklet_unlock(t);
+ }
+
+ local_irq_disable();
+ t->next = NULL;
+ *__this_cpu_read(tasklet_hi_vec.tail) = t;
+ __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+ __raise_softirq_irqoff(HI_SOFTIRQ);
+ local_irq_enable();
+ }
}
@@ -1024,7 +558,7 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
- msleep(1);
+ yield();
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
@@ -1228,26 +762,22 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
-void tasklet_unlock_wait(struct tasklet_struct *t)
+static int ksoftirqd_should_run(unsigned int cpu)
{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
- /*
- * Hack for now to avoid this busy-loop:
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
- msleep(1);
-#else
- barrier();
-#endif
- }
+ return local_softirq_pending();
}
-EXPORT_SYMBOL(tasklet_unlock_wait);
-#endif
-static int ksoftirqd_should_run(unsigned int cpu)
+static void run_ksoftirqd(unsigned int cpu)
{
- return ksoftirqd_softirq_pending();
+ local_irq_disable();
+ if (local_softirq_pending()) {
+ __do_softirq();
+ rcu_note_context_switch(cpu);
+ local_irq_enable();
+ cond_resched();
+ return;
+ }
+ local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1330,8 +860,6 @@ static struct notifier_block cpu_nfb = {
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
- .setup = ksoftirqd_set_sched_params,
- .cleanup = ksoftirqd_clr_sched_params,
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5c76166..4b082b5 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -124,11 +124,8 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
-
-#ifndef CONFIG_PREEMPT_RT_FULL
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
-#endif
#endif
@@ -212,8 +209,6 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
-#ifndef CONFIG_PREEMPT_RT_FULL
-
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
@@ -358,8 +353,6 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
-#endif /* !PREEMPT_RT_FULL */
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 5f02a3f..c09f295 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,12 +29,12 @@ struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
int ret; /* collected return value */
- struct task_struct *waiter; /* woken when nr_todo reaches 0 */
+ struct completion completion; /* fired if nr_todo reaches 0 */
};
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
- raw_spinlock_t lock;
+ spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
};
@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
memset(done, 0, sizeof(*done));
atomic_set(&done->nr_todo, nr_todo);
- done->waiter = current;
+ init_completion(&done->completion);
}
/* signal completion unless @done is NULL */
@@ -56,10 +56,8 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
if (done) {
if (executed)
done->executed = true;
- if (atomic_dec_and_test(&done->nr_todo)) {
- wake_up_process(done->waiter);
- done->waiter = NULL;
- }
+ if (atomic_dec_and_test(&done->nr_todo))
+ complete(&done->completion);
}
}
@@ -71,7 +69,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
unsigned long flags;
- raw_spin_lock_irqsave(&stopper->lock, flags);
+ spin_lock_irqsave(&stopper->lock, flags);
if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works);
@@ -79,23 +77,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
} else
cpu_stop_signal_done(work->done, false);
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
-}
-
-static void wait_for_stop_done(struct cpu_stop_done *done)
-{
- set_current_state(TASK_UNINTERRUPTIBLE);
- while (atomic_read(&done->nr_todo)) {
- schedule();
- set_current_state(TASK_UNINTERRUPTIBLE);
- }
- /*
- * We need to wait until cpu_stop_signal_done() has cleared
- * done->waiter.
- */
- while (done->waiter)
- cpu_relax();
- set_current_state(TASK_RUNNING);
+ spin_unlock_irqrestore(&stopper->lock, flags);
}
/**
@@ -129,7 +111,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
cpu_stop_init_done(&done, 1);
cpu_stop_queue_work(cpu, &work);
- wait_for_stop_done(&done);
+ wait_for_completion(&done.completion);
return done.executed ? done.ret : -ENOENT;
}
@@ -155,12 +137,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
-static DEFINE_MUTEX(stopper_lock);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
- struct cpu_stop_done *done, bool inactive)
+ struct cpu_stop_done *done)
{
struct cpu_stop_work *work;
unsigned int cpu;
@@ -174,18 +155,14 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
}
/*
- * Make sure that all work is queued on all cpus before we
- * any of the cpus can execute it.
+ * Disable preemption while queueing to avoid getting
+ * preempted by a stopper which might wait for other stoppers
+ * to enter @fn which can lead to deadlock.
*/
- if (!inactive) {
- mutex_lock(&stopper_lock);
- } else {
- while (!mutex_trylock(&stopper_lock))
- cpu_relax();
- }
+ preempt_disable();
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
- mutex_unlock(&stopper_lock);
+ preempt_enable();
}
static int __stop_cpus(const struct cpumask *cpumask,
@@ -194,8 +171,8 @@ static int __stop_cpus(const struct cpumask *cpumask,
struct cpu_stop_done done;
cpu_stop_init_done(&done, cpumask_weight(cpumask));
- queue_stop_cpus_work(cpumask, fn, arg, &done, false);
- wait_for_stop_done(&done);
+ queue_stop_cpus_work(cpumask, fn, arg, &done);
+ wait_for_completion(&done.completion);
return done.executed ? done.ret : -ENOENT;
}
@@ -274,9 +251,9 @@ static int cpu_stop_should_run(unsigned int cpu)
unsigned long flags;
int run;
- raw_spin_lock_irqsave(&stopper->lock, flags);
+ spin_lock_irqsave(&stopper->lock, flags);
run = !list_empty(&stopper->works);
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ spin_unlock_irqrestore(&stopper->lock, flags);
return run;
}
@@ -288,13 +265,13 @@ static void cpu_stopper_thread(unsigned int cpu)
repeat:
work = NULL;
- raw_spin_lock_irq(&stopper->lock);
+ spin_lock_irq(&stopper->lock);
if (!list_empty(&stopper->works)) {
work = list_first_entry(&stopper->works,
struct cpu_stop_work, list);
list_del_init(&work->list);
}
- raw_spin_unlock_irq(&stopper->lock);
+ spin_unlock_irq(&stopper->lock);
if (work) {
cpu_stop_fn_t fn = work->fn;
@@ -302,16 +279,6 @@ repeat:
struct cpu_stop_done *done = work->done;
char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
- /*
- * Wait until the stopper finished scheduling on all
- * cpus
- */
- mutex_lock(&stopper_lock);
- /*
- * Let other cpu threads continue as well
- */
- mutex_unlock(&stopper_lock);
-
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();
@@ -326,13 +293,7 @@ repeat:
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
- /*
- * Make sure that the wakeup and setting done->waiter
- * to NULL is atomic.
- */
- local_irq_disable();
cpu_stop_signal_done(done, true);
- local_irq_enable();
goto repeat;
}
}
@@ -351,20 +312,20 @@ static void cpu_stop_park(unsigned int cpu)
unsigned long flags;
/* drain remaining works */
- raw_spin_lock_irqsave(&stopper->lock, flags);
+ spin_lock_irqsave(&stopper->lock, flags);
list_for_each_entry(work, &stopper->works, list)
cpu_stop_signal_done(work->done, false);
stopper->enabled = false;
- raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ spin_unlock_irqrestore(&stopper->lock, flags);
}
static void cpu_stop_unpark(unsigned int cpu)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
- raw_spin_lock_irq(&stopper->lock);
+ spin_lock_irq(&stopper->lock);
stopper->enabled = true;
- raw_spin_unlock_irq(&stopper->lock);
+ spin_unlock_irq(&stopper->lock);
}
static struct smp_hotplug_thread cpu_stop_threads = {
@@ -386,7 +347,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
- raw_spin_lock_init(&stopper->lock);
+ spin_lock_init(&stopper->lock);
INIT_LIST_HEAD(&stopper->works);
}
@@ -569,11 +530,11 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
set_state(&smdata, STOPMACHINE_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
- &done, true);
+ &done);
ret = stop_machine_cpu_stop(&smdata);
/* Busy wait for completion. */
- while (atomic_read(&done.nr_todo))
+ while (!completion_done(&done.completion))
cpu_relax();
mutex_unlock(&stop_cpus_mutex);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 23d7203..a6a5bf5 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -73,8 +73,7 @@ static struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT,
};
-__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
-__cacheline_aligned_in_smp seqcount_t jiffies_seq;
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
@@ -83,9 +82,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqcount_begin(&jiffies_seq);
+ seq = read_seqbegin(&jiffies_lock);
ret = jiffies_64;
- } while (read_seqcount_retry(&jiffies_seq, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index d6132cd..af8d1d4 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,7 +10,6 @@
#include <linux/workqueue.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
-#include <linux/kthread.h>
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
@@ -518,49 +517,10 @@ static void sync_cmos_clock(struct work_struct *work)
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * RT can not call schedule_delayed_work from real interrupt context.
- * Need to make a thread to do the real work.
- */
-static struct task_struct *cmos_delay_thread;
-static bool do_cmos_delay;
-
-static int run_cmos_delay(void *ignore)
-{
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (do_cmos_delay) {
- do_cmos_delay = false;
- schedule_delayed_work(&sync_cmos_work, 0);
- }
- schedule();
- }
- __set_current_state(TASK_RUNNING);
- return 0;
-}
-
-void ntp_notify_cmos_timer(void)
-{
- do_cmos_delay = true;
- /* Make visible before waking up process */
- smp_wmb();
- wake_up_process(cmos_delay_thread);
-}
-
-static __init int create_cmos_delay_thread(void)
-{
- cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
- BUG_ON(!cmos_delay_thread);
- return 0;
-}
-early_initcall(create_cmos_delay_thread);
-#else
void ntp_notify_cmos_timer(void)
{
schedule_delayed_work(&sync_cmos_work, 0);
}
-#endif /* CONFIG_PREEMPT_RT_FULL */
#else
void ntp_notify_cmos_timer(void) { }
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 1b80eb0..64522ec 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,15 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- raw_spin_lock(&jiffies_lock);
- write_seqcount_begin(&jiffies_seq);
+ write_seqlock(&jiffies_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
+ write_sequnlock(&jiffies_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -132,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqcount_begin(&jiffies_seq);
+ seq = read_seqbegin(&jiffies_lock);
next = tick_next_period;
- } while (read_seqcount_retry(&jiffies_seq, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7e5e7f8..bc906ca 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -4,8 +4,7 @@
#include <linux/hrtimer.h>
#include <linux/tick.h>
-extern raw_spinlock_t jiffies_lock;
-extern seqcount_t jiffies_seq;
+extern seqlock_t jiffies_lock;
#define CS_NAME_LEN 32
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3740f28..ea20f7d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -62,8 +62,7 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with jiffies_lock held */
- raw_spin_lock(&jiffies_lock);
- write_seqcount_begin(&jiffies_seq);
+ write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -86,8 +85,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
+ write_sequnlock(&jiffies_lock);
}
/*
@@ -97,14 +95,12 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- raw_spin_lock(&jiffies_lock);
- write_seqcount_begin(&jiffies_seq);
+ write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
+ write_sequnlock(&jiffies_lock);
return period;
}
@@ -221,7 +217,6 @@ static void nohz_full_kick_work_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
- .flags = IRQ_WORK_HARD_IRQ,
};
/*
@@ -543,11 +538,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqcount_begin(&jiffies_seq);
+ seq = read_seqbegin(&jiffies_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqcount_retry(&jiffies_seq, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
@@ -725,7 +720,14 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
- softirq_check_pending_idle();
+ static int ratelimit;
+
+ if (ratelimit < 10 &&
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+ pr_warn("NOHZ: local_softirq_pending %02x\n",
+ (unsigned int) local_softirq_pending());
+ ratelimit++;
+ }
return false;
}
@@ -1110,7 +1112,6 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d3150a7..bfca770 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1754,9 +1754,7 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
- raw_spin_lock(&jiffies_lock);
- write_seqcount_begin(&jiffies_seq);
+ write_seqlock(&jiffies_lock);
do_timer(ticks);
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
+ write_sequnlock(&jiffies_lock);
}
diff --git a/kernel/timer.c b/kernel/timer.c
index cc34e42..4296d13 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -78,9 +78,6 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
-#ifdef CONFIG_PREEMPT_RT_FULL
- wait_queue_head_t wait_for_running_timer;
-#endif
unsigned long timer_jiffies;
unsigned long next_timer;
unsigned long active_timers;
@@ -723,36 +720,6 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
- struct tvec_base *old,
- struct tvec_base *new)
-{
- /* See the comment in lock_timer_base() */
- timer_set_base(timer, NULL);
- spin_unlock(&old->lock);
- spin_lock(&new->lock);
- timer_set_base(timer, new);
- return new;
-}
-#else
-static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
- struct tvec_base *old,
- struct tvec_base *new)
-{
- /*
- * We cannot do the above because we might be preempted and
- * then the preempter would see NULL and loop forever.
- */
- if (spin_trylock(&new->lock)) {
- timer_set_base(timer, new);
- spin_unlock(&old->lock);
- return new;
- }
- return old;
-}
-#endif
-
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
bool pending_only, int pinned)
@@ -772,15 +739,12 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
- preempt_disable_rt();
cpu = smp_processor_id();
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
cpu = get_nohz_timer_target();
#endif
- preempt_enable_rt();
-
new_base = per_cpu(tvec_bases, cpu);
if (base != new_base) {
@@ -791,8 +755,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (likely(base->running_timer != timer))
- base = switch_timer_base(timer, base, new_base);
+ if (likely(base->running_timer != timer)) {
+ /* See the comment in lock_timer_base() */
+ timer_set_base(timer, NULL);
+ spin_unlock(&base->lock);
+ base = new_base;
+ spin_lock(&base->lock);
+ timer_set_base(timer, base);
+ }
}
timer->expires = expires;
@@ -975,29 +945,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * Wait for a running timer
- */
-static void wait_for_running_timer(struct timer_list *timer)
-{
- struct tvec_base *base = timer->base;
-
- if (base->running_timer == timer)
- wait_event(base->wait_for_running_timer,
- base->running_timer != timer);
-}
-
-# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
-#else
-static inline void wait_for_running_timer(struct timer_list *timer)
-{
- cpu_relax();
-}
-
-# define wakeup_timer_waiters(b) do { } while (0)
-#endif
-
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
@@ -1055,7 +1002,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
@@ -1115,7 +1062,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
- wait_for_running_timer(timer);
+ cpu_relax();
}
}
EXPORT_SYMBOL(del_timer_sync);
@@ -1232,17 +1179,15 @@ static inline void __run_timers(struct tvec_base *base)
if (irqsafe) {
spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
- base->running_timer = NULL;
spin_lock(&base->lock);
} else {
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
- base->running_timer = NULL;
spin_lock_irq(&base->lock);
}
}
}
- wakeup_timer_waiters(base);
+ base->running_timer = NULL;
spin_unlock_irq(&base->lock);
}
@@ -1382,31 +1327,17 @@ unsigned long get_next_timer_interrupt(unsigned long now)
if (cpu_is_offline(smp_processor_id()))
return expires;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- * On PREEMPT_RT we cannot sleep here. If the trylock does not
- * succeed then we return the worst-case 'expires in 1 tick'
- * value. We use the rt functions here directly to avoid a
- * migrate_disable() call.
- */
- if (!spin_do_trylock(&base->lock))
- return now + 1;
-#else
spin_lock(&base->lock);
-#endif
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
- rt_spin_unlock_after_trylock_in_irq(&base->lock);
-#else
spin_unlock(&base->lock);
-#endif
if (time_before_eq(expires, now))
return now;
+
return cmp_next_hrtimer_event(now, expires);
}
#endif
@@ -1422,13 +1353,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
- scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
-#if defined(CONFIG_IRQ_WORK)
+#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_run();
#endif
+ scheduler_tick();
run_posix_cpu_timers(p);
}
@@ -1439,9 +1370,7 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- irq_work_run();
-#endif
+ hrtimer_run_pending();
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
@@ -1452,39 +1381,8 @@ static void run_timer_softirq(struct softirq_action *h)
*/
void run_local_timers(void)
{
- struct tvec_base *base = __this_cpu_read(tvec_bases);
-
hrtimer_run_queues();
- /*
- * We can access this lockless as we are in the timer
- * interrupt. If there are no timers queued, nothing to do in
- * the timer softirq.
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* On RT, irq work runs from softirq */
- if (irq_work_needs_cpu()) {
- raise_softirq(TIMER_SOFTIRQ);
- return;
- }
-
- if (!spin_do_trylock(&base->lock)) {
- raise_softirq(TIMER_SOFTIRQ);
- return;
- }
-#endif
-
- if (!base->active_timers)
- goto out;
-
- /* Check whether the next pending timer has expired */
- if (time_before_eq(base->next_timer, jiffies))
- raise_softirq(TIMER_SOFTIRQ);
-out:
-#ifdef CONFIG_PREEMPT_RT_FULL
- rt_spin_unlock_after_trylock_in_irq(&base->lock);
-#endif
- /* The ; ensures that gcc won't complain in the !RT case */
- ;
+ raise_softirq(TIMER_SOFTIRQ);
}
#ifdef __ARCH_WANT_SYS_ALARM
@@ -1649,9 +1547,6 @@ static int init_timers_cpu(int cpu)
base = per_cpu(tvec_bases, cpu);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
- init_waitqueue_head(&base->wait_for_running_timer);
-#endif
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1690,7 +1585,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
- new_base = get_local_var(tvec_bases);
+ new_base = get_cpu_var(tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1711,7 +1606,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
- put_local_var(tvec_bases);
+ put_cpu_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index bbe95b9..015f85a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -192,24 +192,6 @@ config IRQSOFF_TRACER
enabled. This option and the preempt-off timing option can be
used together or separately.)
-config INTERRUPT_OFF_HIST
- bool "Interrupts-off Latency Histogram"
- depends on IRQSOFF_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the duration of time periods with interrupts disabled. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-
- If PREEMPT_OFF_HIST is also selected, additional histograms (one
- per cpu) are generated that accumulate the duration of time periods
- when both interrupts and preemption are disabled. The histogram data
- will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/irqsoff
-
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
@@ -234,24 +216,6 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
-config PREEMPT_OFF_HIST
- bool "Preemption-off Latency Histogram"
- depends on PREEMPT_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the duration of time periods with preemption disabled. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
-
- If INTERRUPT_OFF_HIST is also selected, additional histograms (one
- per cpu) are generated that accumulate the duration of time periods
- when both interrupts and preemption are disabled. The histogram data
- will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/preemptoff
-
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
@@ -262,74 +226,6 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
-config WAKEUP_LATENCY_HIST
- bool "Scheduling Latency Histogram"
- depends on SCHED_TRACER
- help
- This option generates continuously updated histograms (one per cpu)
- of the scheduling latency of the highest priority task.
- The histograms are disabled by default. To enable them, write a
- non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/wakeup
-
- Two different algorithms are used, one to determine the latency of
- processes that exclusively use the highest priority of the system and
- another one to determine the latency of processes that share the
- highest system priority with other processes. The former is used to
- improve hardware and system software, the latter to optimize the
- priority design of a given system. The histogram data will be
- located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/wakeup
-
- and
-
- /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
-
- If both Scheduling Latency Histogram and Missed Timer Offsets
- Histogram are selected, additional histogram data will be collected
- that contain, in addition to the wakeup latency, the timer latency, in
- case the wakeup was triggered by an expired timer. These histograms
- are available in the
-
- /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-
- directory. They reflect the apparent interrupt and scheduling latency
- and are best suitable to determine the worst-case latency of a given
- system. To enable these histograms, write a non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-
-config MISSED_TIMER_OFFSETS_HIST
- depends on HIGH_RES_TIMERS
- select GENERIC_TRACER
- bool "Missed Timer Offsets Histogram"
- help
- Generate a histogram of missed timer offsets in microseconds. The
- histograms are disabled by default. To enable them, write a non-zero
- number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
-
- The histogram data will be located in the debug file system at
-
- /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
-
- If both Scheduling Latency Histogram and Missed Timer Offsets
- Histogram are selected, additional histogram data will be collected
- that contain, in addition to the wakeup latency, the timer latency, in
- case the wakeup was triggered by an expired timer. These histograms
- are available in the
-
- /sys/kernel/debug/tracing/latency_hist/timerandwakeup
-
- directory. They reflect the apparent interrupt and scheduling latency
- and are best suitable to determine the worst-case latency of a given
- system. To enable these histograms, write a non-zero number to
-
- /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
-
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f5e0243..d7e2068 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -34,10 +34,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
-obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
-obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
-obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
-obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
deleted file mode 100644
index 66a69eb..0000000
--- a/kernel/trace/latency_hist.c
+++ /dev/null
@@ -1,1178 +0,0 @@
-/*
- * kernel/trace/latency_hist.c
- *
- * Add support for histograms of preemption-off latency and
- * interrupt-off latency and wakeup latency, it depends on
- * Real-Time Preemption Support.
- *
- * Copyright (C) 2005 MontaVista Software, Inc.
- * Yi Yang <yyang@ch.mvista.com>
- *
- * Converted to work with the new latency tracer.
- * Copyright (C) 2008 Red Hat, Inc.
- * Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/percpu.h>
-#include <linux/kallsyms.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-#include <linux/sched/rt.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <asm/div64.h>
-
-#include "trace.h"
-#include <trace/events/sched.h>
-
-#define NSECS_PER_USECS 1000L
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/hist.h>
-
-enum {
- IRQSOFF_LATENCY = 0,
- PREEMPTOFF_LATENCY,
- PREEMPTIRQSOFF_LATENCY,
- WAKEUP_LATENCY,
- WAKEUP_LATENCY_SHAREDPRIO,
- MISSED_TIMER_OFFSETS,
- TIMERANDWAKEUP_LATENCY,
- MAX_LATENCY_TYPE,
-};
-
-#define MAX_ENTRY_NUM 10240
-
-struct hist_data {
- atomic_t hist_mode; /* 0 log, 1 don't log */
- long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
- long min_lat;
- long max_lat;
- unsigned long long below_hist_bound_samples;
- unsigned long long above_hist_bound_samples;
- long long accumulate_lat;
- unsigned long long total_samples;
- unsigned long long hist_array[MAX_ENTRY_NUM];
-};
-
-struct enable_data {
- int latency_type;
- int enabled;
-};
-
-static char *latency_hist_dir_root = "latency_hist";
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
-static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
-static char *irqsoff_hist_dir = "irqsoff";
-static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
-static DEFINE_PER_CPU(int, hist_irqsoff_counting);
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
-static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
-static char *preemptoff_hist_dir = "preemptoff";
-static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
-static DEFINE_PER_CPU(int, hist_preemptoff_counting);
-#endif
-
-#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
-static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
-static char *preemptirqsoff_hist_dir = "preemptirqsoff";
-static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
-static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
-#endif
-
-#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
-static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
-static struct enable_data preemptirqsoff_enabled_data = {
- .latency_type = PREEMPTIRQSOFF_LATENCY,
- .enabled = 0,
-};
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-struct maxlatproc_data {
- char comm[FIELD_SIZEOF(struct task_struct, comm)];
- char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
- int pid;
- int current_pid;
- int prio;
- int current_prio;
- long latency;
- long timeroffset;
- cycle_t timestamp;
-};
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
-static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
-static char *wakeup_latency_hist_dir = "wakeup";
-static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
-static notrace void probe_wakeup_latency_hist_start(void *v,
- struct task_struct *p, int success);
-static notrace void probe_wakeup_latency_hist_stop(void *v,
- struct task_struct *prev, struct task_struct *next);
-static notrace void probe_sched_migrate_task(void *,
- struct task_struct *task, int cpu);
-static struct enable_data wakeup_latency_enabled_data = {
- .latency_type = WAKEUP_LATENCY,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
-static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
-static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
-static DEFINE_PER_CPU(int, wakeup_sharedprio);
-static unsigned long wakeup_pid;
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
-static char *missed_timer_offsets_dir = "missed_timer_offsets";
-static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- long long offset, struct task_struct *curr, struct task_struct *task);
-static struct enable_data missed_timer_offsets_enabled_data = {
- .latency_type = MISSED_TIMER_OFFSETS,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
-static unsigned long missed_timer_offsets_pid;
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
-static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
-static struct enable_data timerandwakeup_enabled_data = {
- .latency_type = TIMERANDWAKEUP_LATENCY,
- .enabled = 0,
-};
-static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
-#endif
-
-void notrace latency_hist(int latency_type, int cpu, long latency,
- long timeroffset, cycle_t stop,
- struct task_struct *p)
-{
- struct hist_data *my_hist;
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- struct maxlatproc_data *mp = NULL;
-#endif
-
- if (!cpu_possible(cpu) || latency_type < 0 ||
- latency_type >= MAX_LATENCY_TYPE)
- return;
-
- switch (latency_type) {
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- case IRQSOFF_LATENCY:
- my_hist = &per_cpu(irqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_PREEMPT_OFF_HIST
- case PREEMPTOFF_LATENCY:
- my_hist = &per_cpu(preemptoff_hist, cpu);
- break;
-#endif
-#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- my_hist = &per_cpu(preemptirqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- my_hist = &per_cpu(wakeup_latency_hist, cpu);
- mp = &per_cpu(wakeup_maxlatproc, cpu);
- break;
- case WAKEUP_LATENCY_SHAREDPRIO:
- my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- my_hist = &per_cpu(missed_timer_offsets, cpu);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- break;
-#endif
-
- default:
- return;
- }
-
- latency += my_hist->offset;
-
- if (atomic_read(&my_hist->hist_mode) == 0)
- return;
-
- if (latency < 0 || latency >= MAX_ENTRY_NUM) {
- if (latency < 0)
- my_hist->below_hist_bound_samples++;
- else
- my_hist->above_hist_bound_samples++;
- } else
- my_hist->hist_array[latency]++;
-
- if (unlikely(latency > my_hist->max_lat ||
- my_hist->min_lat == LONG_MAX)) {
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- if (latency_type == WAKEUP_LATENCY ||
- latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- latency_type == MISSED_TIMER_OFFSETS ||
- latency_type == TIMERANDWAKEUP_LATENCY) {
- strncpy(mp->comm, p->comm, sizeof(mp->comm));
- strncpy(mp->current_comm, current->comm,
- sizeof(mp->current_comm));
- mp->pid = task_pid_nr(p);
- mp->current_pid = task_pid_nr(current);
- mp->prio = p->prio;
- mp->current_prio = current->prio;
- mp->latency = latency;
- mp->timeroffset = timeroffset;
- mp->timestamp = stop;
- }
-#endif
- my_hist->max_lat = latency;
- }
- if (unlikely(latency < my_hist->min_lat))
- my_hist->min_lat = latency;
- my_hist->total_samples++;
- my_hist->accumulate_lat += latency;
-}
-
-static void *l_start(struct seq_file *m, loff_t *pos)
-{
- loff_t *index_ptr = NULL;
- loff_t index = *pos;
- struct hist_data *my_hist = m->private;
-
- if (index == 0) {
- char minstr[32], avgstr[32], maxstr[32];
-
- atomic_dec(&my_hist->hist_mode);
-
- if (likely(my_hist->total_samples)) {
- long avg = (long) div64_s64(my_hist->accumulate_lat,
- my_hist->total_samples);
- snprintf(minstr, sizeof(minstr), "%ld",
- my_hist->min_lat - my_hist->offset);
- snprintf(avgstr, sizeof(avgstr), "%ld",
- avg - my_hist->offset);
- snprintf(maxstr, sizeof(maxstr), "%ld",
- my_hist->max_lat - my_hist->offset);
- } else {
- strcpy(minstr, "<undef>");
- strcpy(avgstr, minstr);
- strcpy(maxstr, minstr);
- }
-
- seq_printf(m, "#Minimum latency: %s microseconds\n"
- "#Average latency: %s microseconds\n"
- "#Maximum latency: %s microseconds\n"
- "#Total samples: %llu\n"
- "#There are %llu samples lower than %ld"
- " microseconds.\n"
- "#There are %llu samples greater or equal"
- " than %ld microseconds.\n"
- "#usecs\t%16s\n",
- minstr, avgstr, maxstr,
- my_hist->total_samples,
- my_hist->below_hist_bound_samples,
- -my_hist->offset,
- my_hist->above_hist_bound_samples,
- MAX_ENTRY_NUM - my_hist->offset,
- "samples");
- }
- if (index < MAX_ENTRY_NUM) {
- index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
- if (index_ptr)
- *index_ptr = index;
- }
-
- return index_ptr;
-}
-
-static void *l_next(struct seq_file *m, void *p, loff_t *pos)
-{
- loff_t *index_ptr = p;
- struct hist_data *my_hist = m->private;
-
- if (++*pos >= MAX_ENTRY_NUM) {
- atomic_inc(&my_hist->hist_mode);
- return NULL;
- }
- *index_ptr = *pos;
- return index_ptr;
-}
-
-static void l_stop(struct seq_file *m, void *p)
-{
- kfree(p);
-}
-
-static int l_show(struct seq_file *m, void *p)
-{
- int index = *(loff_t *) p;
- struct hist_data *my_hist = m->private;
-
- seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
- my_hist->hist_array[index]);
- return 0;
-}
-
-static const struct seq_operations latency_hist_seq_op = {
- .start = l_start,
- .next = l_next,
- .stop = l_stop,
- .show = l_show
-};
-
-static int latency_hist_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- ret = seq_open(file, &latency_hist_seq_op);
- if (!ret) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations latency_hist_fops = {
- .open = latency_hist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static void clear_maxlatprocdata(struct maxlatproc_data *mp)
-{
- mp->comm[0] = mp->current_comm[0] = '\0';
- mp->prio = mp->current_prio = mp->pid = mp->current_pid =
- mp->latency = mp->timeroffset = -1;
- mp->timestamp = 0;
-}
-#endif
-
-static void hist_reset(struct hist_data *hist)
-{
- atomic_dec(&hist->hist_mode);
-
- memset(hist->hist_array, 0, sizeof(hist->hist_array));
- hist->below_hist_bound_samples = 0ULL;
- hist->above_hist_bound_samples = 0ULL;
- hist->min_lat = LONG_MAX;
- hist->max_lat = LONG_MIN;
- hist->total_samples = 0ULL;
- hist->accumulate_lat = 0LL;
-
- atomic_inc(&hist->hist_mode);
-}
-
-static ssize_t
-latency_hist_reset(struct file *file, const char __user *a,
- size_t size, loff_t *off)
-{
- int cpu;
- struct hist_data *hist = NULL;
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- struct maxlatproc_data *mp = NULL;
-#endif
- off_t latency_type = (off_t) file->private_data;
-
- for_each_online_cpu(cpu) {
-
- switch (latency_type) {
-#ifdef CONFIG_PREEMPT_OFF_HIST
- case PREEMPTOFF_LATENCY:
- hist = &per_cpu(preemptoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- case IRQSOFF_LATENCY:
- hist = &per_cpu(irqsoff_hist, cpu);
- break;
-#endif
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- hist = &per_cpu(preemptirqsoff_hist, cpu);
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- hist = &per_cpu(wakeup_latency_hist, cpu);
- mp = &per_cpu(wakeup_maxlatproc, cpu);
- break;
- case WAKEUP_LATENCY_SHAREDPRIO:
- hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- hist = &per_cpu(missed_timer_offsets, cpu);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- break;
-#endif
- }
-
- hist_reset(hist);
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- if (latency_type == WAKEUP_LATENCY ||
- latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- latency_type == MISSED_TIMER_OFFSETS ||
- latency_type == TIMERANDWAKEUP_LATENCY)
- clear_maxlatprocdata(mp);
-#endif
- }
-
- return size;
-}
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static ssize_t
-show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- int r;
- unsigned long *this_pid = file->private_data;
-
- r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t do_pid(struct file *file, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- unsigned long pid;
- unsigned long *this_pid = file->private_data;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = '\0';
-
- if (kstrtoul(buf, 10, &pid))
- return -EINVAL;
-
- *this_pid = pid;
-
- return cnt;
-}
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static ssize_t
-show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- int r;
- struct maxlatproc_data *mp = file->private_data;
- int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
- unsigned long long t;
- unsigned long usecs, secs;
- char *buf;
-
- if (mp->pid == -1 || mp->current_pid == -1) {
- buf = "(none)\n";
- return simple_read_from_buffer(ubuf, cnt, ppos, buf,
- strlen(buf));
- }
-
- buf = kmalloc(strmaxlen, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- t = ns2usecs(mp->timestamp);
- usecs = do_div(t, USEC_PER_SEC);
- secs = (unsigned long) t;
- r = snprintf(buf, strmaxlen,
- "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
- MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
- mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
- secs, usecs);
- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
- kfree(buf);
- return r;
-}
-#endif
-
-static ssize_t
-show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- struct enable_data *ed = file->private_data;
- int r;
-
- r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
-{
- char buf[64];
- long enable;
- struct enable_data *ed = file->private_data;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- if (kstrtoul(buf, 10, &enable))
- return -EINVAL;
-
- if ((enable && ed->enabled) || (!enable && !ed->enabled))
- return cnt;
-
- if (enable) {
- int ret;
-
- switch (ed->latency_type) {
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- ret = register_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_preemptirqsoff_hist "
- "to trace_preemptirqsoff_hist\n");
- return ret;
- }
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- ret = register_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_start "
- "to trace_sched_wakeup\n");
- return ret;
- }
- ret = register_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_start "
- "to trace_sched_wakeup_new\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- return ret;
- }
- ret = register_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_wakeup_latency_hist_stop "
- "to trace_sched_switch\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- return ret;
- }
- ret = register_trace_sched_migrate_task(
- probe_sched_migrate_task, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_sched_migrate_task "
- "to trace_sched_migrate_task\n");
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- return ret;
- }
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- ret = register_trace_hrtimer_interrupt(
- probe_hrtimer_interrupt, NULL);
- if (ret) {
- pr_info("wakeup trace: Couldn't assign "
- "probe_hrtimer_interrupt "
- "to trace_hrtimer_interrupt\n");
- return ret;
- }
- break;
-#endif
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- case TIMERANDWAKEUP_LATENCY:
- if (!wakeup_latency_enabled_data.enabled ||
- !missed_timer_offsets_enabled_data.enabled)
- return -EINVAL;
- break;
-#endif
- default:
- break;
- }
- } else {
- switch (ed->latency_type) {
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- case PREEMPTIRQSOFF_LATENCY:
- {
- int cpu;
-
- unregister_trace_preemptirqsoff_hist(
- probe_preemptirqsoff_hist, NULL);
- for_each_online_cpu(cpu) {
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- per_cpu(hist_irqsoff_counting,
- cpu) = 0;
-#endif
-#ifdef CONFIG_PREEMPT_OFF_HIST
- per_cpu(hist_preemptoff_counting,
- cpu) = 0;
-#endif
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- per_cpu(hist_preemptirqsoff_counting,
- cpu) = 0;
-#endif
- }
- }
- break;
-#endif
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- case WAKEUP_LATENCY:
- {
- int cpu;
-
- unregister_trace_sched_wakeup(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_wakeup_new(
- probe_wakeup_latency_hist_start, NULL);
- unregister_trace_sched_switch(
- probe_wakeup_latency_hist_stop, NULL);
- unregister_trace_sched_migrate_task(
- probe_sched_migrate_task, NULL);
-
- for_each_online_cpu(cpu) {
- per_cpu(wakeup_task, cpu) = NULL;
- per_cpu(wakeup_sharedprio, cpu) = 0;
- }
- }
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- timerandwakeup_enabled_data.enabled = 0;
-#endif
- break;
-#endif
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- case MISSED_TIMER_OFFSETS:
- unregister_trace_hrtimer_interrupt(
- probe_hrtimer_interrupt, NULL);
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- timerandwakeup_enabled_data.enabled = 0;
-#endif
- break;
-#endif
- default:
- break;
- }
- }
- ed->enabled = enable;
- return cnt;
-}
-
-static const struct file_operations latency_hist_reset_fops = {
- .open = tracing_open_generic,
- .write = latency_hist_reset,
-};
-
-static const struct file_operations enable_fops = {
- .open = tracing_open_generic,
- .read = show_enable,
- .write = do_enable,
-};
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
-static const struct file_operations pid_fops = {
- .open = tracing_open_generic,
- .read = show_pid,
- .write = do_pid,
-};
-
-static const struct file_operations maxlatproc_fops = {
- .open = tracing_open_generic,
- .read = show_maxlatproc,
-};
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-static notrace void probe_preemptirqsoff_hist(void *v, int reason,
- int starthist)
-{
- int cpu = raw_smp_processor_id();
- int time_set = 0;
-
- if (starthist) {
- cycle_t uninitialized_var(start);
-
- if (!preempt_count() && !irqs_disabled())
- return;
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- if ((reason == IRQS_OFF || reason == TRACE_START) &&
- !per_cpu(hist_irqsoff_counting, cpu)) {
- per_cpu(hist_irqsoff_counting, cpu) = 1;
- start = ftrace_now(cpu);
- time_set++;
- per_cpu(hist_irqsoff_start, cpu) = start;
- }
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
- !per_cpu(hist_preemptoff_counting, cpu)) {
- per_cpu(hist_preemptoff_counting, cpu) = 1;
- if (!(time_set++))
- start = ftrace_now(cpu);
- per_cpu(hist_preemptoff_start, cpu) = start;
- }
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- if (per_cpu(hist_irqsoff_counting, cpu) &&
- per_cpu(hist_preemptoff_counting, cpu) &&
- !per_cpu(hist_preemptirqsoff_counting, cpu)) {
- per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
- if (!time_set)
- start = ftrace_now(cpu);
- per_cpu(hist_preemptirqsoff_start, cpu) = start;
- }
-#endif
- } else {
- cycle_t uninitialized_var(stop);
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- if ((reason == IRQS_ON || reason == TRACE_STOP) &&
- per_cpu(hist_irqsoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_irqsoff_start, cpu);
-
- stop = ftrace_now(cpu);
- time_set++;
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
- stop, NULL);
- }
- per_cpu(hist_irqsoff_counting, cpu) = 0;
- }
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
- per_cpu(hist_preemptoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_preemptoff_start, cpu);
-
- if (!(time_set++))
- stop = ftrace_now(cpu);
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
- 0, stop, NULL);
- }
- per_cpu(hist_preemptoff_counting, cpu) = 0;
- }
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- if ((!per_cpu(hist_irqsoff_counting, cpu) ||
- !per_cpu(hist_preemptoff_counting, cpu)) &&
- per_cpu(hist_preemptirqsoff_counting, cpu)) {
- cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
-
- if (!time_set)
- stop = ftrace_now(cpu);
- if (start) {
- long latency = ((long) (stop - start)) /
- NSECS_PER_USECS;
-
- latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
- latency, 0, stop, NULL);
- }
- per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
- }
-#endif
- }
-}
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static DEFINE_RAW_SPINLOCK(wakeup_lock);
-static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
- int cpu)
-{
- int old_cpu = task_cpu(task);
-
- if (cpu != old_cpu) {
- unsigned long flags;
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
- if (task == cpu_wakeup_task) {
- put_task_struct(cpu_wakeup_task);
- per_cpu(wakeup_task, old_cpu) = NULL;
- cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
- get_task_struct(cpu_wakeup_task);
- }
-
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
- }
-}
-
-static notrace void probe_wakeup_latency_hist_start(void *v,
- struct task_struct *p, int success)
-{
- unsigned long flags;
- struct task_struct *curr = current;
- int cpu = task_cpu(p);
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-
- if (wakeup_pid) {
- if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- p->prio == curr->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
- if (likely(wakeup_pid != task_pid_nr(p)))
- goto out;
- } else {
- if (likely(!rt_task(p)) ||
- (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
- p->prio > curr->prio)
- goto out;
- if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- p->prio == curr->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
- }
-
- if (cpu_wakeup_task)
- put_task_struct(cpu_wakeup_task);
- cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
- get_task_struct(cpu_wakeup_task);
- cpu_wakeup_task->preempt_timestamp_hist =
- ftrace_now(raw_smp_processor_id());
-out:
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-}
-
-static notrace void probe_wakeup_latency_hist_stop(void *v,
- struct task_struct *prev, struct task_struct *next)
-{
- unsigned long flags;
- int cpu = task_cpu(next);
- long latency;
- cycle_t stop;
- struct task_struct *cpu_wakeup_task;
-
- raw_spin_lock_irqsave(&wakeup_lock, flags);
-
- cpu_wakeup_task = per_cpu(wakeup_task, cpu);
-
- if (cpu_wakeup_task == NULL)
- goto out;
-
- /* Already running? */
- if (unlikely(current == cpu_wakeup_task))
- goto out_reset;
-
- if (next != cpu_wakeup_task) {
- if (next->prio < cpu_wakeup_task->prio)
- goto out_reset;
-
- if (next->prio == cpu_wakeup_task->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
-
- goto out;
- }
-
- if (current->prio == cpu_wakeup_task->prio)
- per_cpu(wakeup_sharedprio, cpu) = 1;
-
- /*
- * The task we are waiting for is about to be switched to.
- * Calculate latency and store it in histogram.
- */
- stop = ftrace_now(raw_smp_processor_id());
-
- latency = ((long) (stop - next->preempt_timestamp_hist)) /
- NSECS_PER_USECS;
-
- if (per_cpu(wakeup_sharedprio, cpu)) {
- latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
- next);
- per_cpu(wakeup_sharedprio, cpu) = 0;
- } else {
- latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- if (timerandwakeup_enabled_data.enabled) {
- latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
- next->timer_offset + latency, next->timer_offset,
- stop, next);
- }
-#endif
- }
-
-out_reset:
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- next->timer_offset = 0;
-#endif
- put_task_struct(cpu_wakeup_task);
- per_cpu(wakeup_task, cpu) = NULL;
-out:
- raw_spin_unlock_irqrestore(&wakeup_lock, flags);
-}
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- long long latency_ns, struct task_struct *curr,
- struct task_struct *task)
-{
- if (latency_ns <= 0 && task != NULL && rt_task(task) &&
- (task->prio < curr->prio ||
- (task->prio == curr->prio &&
- !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
- long latency;
- cycle_t now;
-
- if (missed_timer_offsets_pid) {
- if (likely(missed_timer_offsets_pid !=
- task_pid_nr(task)))
- return;
- }
-
- now = ftrace_now(cpu);
- latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
- latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
- task);
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- task->timer_offset = latency;
-#endif
- }
-}
-#endif
-
-static __init int latency_hist_init(void)
-{
- struct dentry *latency_hist_root = NULL;
- struct dentry *dentry;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- struct dentry *dentry_sharedprio;
-#endif
- struct dentry *entry;
- struct dentry *enable_root;
- int i = 0;
- struct hist_data *my_hist;
- char name[64];
- char *cpufmt = "CPU%d";
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- char *cpufmt_maxlatproc = "max_latency-CPU%d";
- struct maxlatproc_data *mp = NULL;
-#endif
-
- dentry = tracing_init_dentry();
- latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
- enable_root = debugfs_create_dir("enable", latency_hist_root);
-
-#ifdef CONFIG_INTERRUPT_OFF_HIST
- dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(irqsoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(irqsoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#ifdef CONFIG_PREEMPT_OFF_HIST
- dentry = debugfs_create_dir(preemptoff_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(preemptoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
- my_hist = &per_cpu(preemptirqsoff_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
-#endif
-
-#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- entry = debugfs_create_file("preemptirqsoff", 0644,
- enable_root, (void *)&preemptirqsoff_enabled_data,
- &enable_fops);
-#endif
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- dentry = debugfs_create_dir(wakeup_latency_hist_dir,
- latency_hist_root);
- dentry_sharedprio = debugfs_create_dir(
- wakeup_latency_hist_dir_sharedprio, dentry);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
-
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(wakeup_latency_hist, i),
- &latency_hist_fops);
- my_hist = &per_cpu(wakeup_latency_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- entry = debugfs_create_file(name, 0444, dentry_sharedprio,
- &per_cpu(wakeup_latency_hist_sharedprio, i),
- &latency_hist_fops);
- my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
-
- mp = &per_cpu(wakeup_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
-
- mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
- entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("pid", 0644, dentry,
- (void *)&wakeup_pid, &pid_fops);
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
- entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
- (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
- entry = debugfs_create_file("wakeup", 0644,
- enable_root, (void *)&wakeup_latency_enabled_data,
- &enable_fops);
-#endif
-
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- dentry = debugfs_create_dir(missed_timer_offsets_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
- my_hist = &per_cpu(missed_timer_offsets, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
- mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("pid", 0644, dentry,
- (void *)&missed_timer_offsets_pid, &pid_fops);
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
- entry = debugfs_create_file("missed_timer_offsets", 0644,
- enable_root, (void *)&missed_timer_offsets_enabled_data,
- &enable_fops);
-#endif
-
-#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
- latency_hist_root);
- for_each_possible_cpu(i) {
- sprintf(name, cpufmt, i);
- entry = debugfs_create_file(name, 0444, dentry,
- &per_cpu(timerandwakeup_latency_hist, i),
- &latency_hist_fops);
- my_hist = &per_cpu(timerandwakeup_latency_hist, i);
- atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = LONG_MAX;
-
- sprintf(name, cpufmt_maxlatproc, i);
- mp = &per_cpu(timerandwakeup_maxlatproc, i);
- entry = debugfs_create_file(name, 0444, dentry, mp,
- &maxlatproc_fops);
- clear_maxlatprocdata(mp);
- }
- entry = debugfs_create_file("reset", 0644, dentry,
- (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
- entry = debugfs_create_file("timerandwakeup", 0644,
- enable_root, (void *)&timerandwakeup_enabled_data,
- &enable_fops);
-#endif
- return 0;
-}
-
-device_initcall(latency_hist_init);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f9401ed..138077b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -442,7 +442,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
irq_flags, preempt_count());
if (!event)
return 0;
@@ -1509,7 +1509,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
- entry->preempt_lazy_count = preempt_lazy_count();
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
@@ -1519,10 +1518,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
- (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0);
-
- entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -2412,17 +2408,14 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _--------=> CPU# \n");
- seq_puts(m, "# / _-------=> irqs-off \n");
- seq_puts(m, "# | / _------=> need-resched \n");
- seq_puts(m, "# || / _-----=> need-resched_lazy \n");
- seq_puts(m, "# ||| / _----=> hardirq/softirq \n");
- seq_puts(m, "# |||| / _---=> preempt-depth \n");
- seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n");
- seq_puts(m, "# |||||| / _-=> migrate-disable \n");
- seq_puts(m, "# ||||||| / delay \n");
- seq_puts(m, "# cmd pid |||||||| time | caller \n");
- seq_puts(m, "# \\ / |||||||| \\ | / \n");
+ seq_puts(m, "# _------=> CPU# \n");
+ seq_puts(m, "# / _-----=> irqs-off \n");
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+ seq_puts(m, "# |||| / delay \n");
+ seq_puts(m, "# cmd pid ||||| time | caller \n");
+ seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
@@ -2446,16 +2439,13 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
print_event_info(buf, m);
- seq_puts(m, "# _-------=> irqs-off \n");
- seq_puts(m, "# / _------=> need-resched \n");
- seq_puts(m, "# |/ _-----=> need-resched_lazy \n");
- seq_puts(m, "# ||/ _----=> hardirq/softirq \n");
- seq_puts(m, "# |||/ _---=> preempt-depth \n");
- seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n");
- seq_puts(m, "# ||||| / _-=> migrate-disable \n");
- seq_puts(m, "# |||||| / delay\n");
- seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n");
- seq_puts(m, "# | | | |||||| | |\n");
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||| | |\n");
}
void
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 109291a..10c86fb 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -117,7 +117,6 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
- * NEED_RESCHED_LAZY - lazy reschedule is requested
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -125,7 +124,6 @@ enum trace_flag_type {
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
};
#define TRACE_BUF_SIZE 1024
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7531ded..bc1bd20 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -27,12 +27,6 @@
DEFINE_MUTEX(event_mutex);
-DEFINE_MUTEX(event_storage_mutex);
-EXPORT_SYMBOL_GPL(event_storage_mutex);
-
-char event_storage[EVENT_STORAGE_SIZE];
-EXPORT_SYMBOL_GPL(event_storage);
-
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_common_fields);
@@ -166,8 +160,6 @@ static int trace_define_common_fields(void)
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
- __common_field(unsigned short, migrate_disable);
- __common_field(unsigned short, padding);
return ret;
}
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d21a746..d7d0b50 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef __array
#define __array(type, item, len) \
do { \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- mutex_lock(&event_storage_mutex); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), filter_type); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2f4eb37..2aefbee 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -17,7 +17,6 @@
#include <linux/fs.h>
#include "trace.h"
-#include <trace/events/hist.h>
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -440,13 +439,11 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- trace_preemptirqsoff_hist(TRACE_START, 1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void)
{
- trace_preemptirqsoff_hist(TRACE_STOP, 0);
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -456,7 +453,6 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
@@ -465,7 +461,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
@@ -491,7 +486,6 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -501,13 +495,11 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
void trace_hardirqs_on_caller(unsigned long caller_addr)
{
- trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
@@ -517,7 +509,6 @@ void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
- trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -527,14 +518,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(PREEMPT_ON, 0);
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- trace_preemptirqsoff_hist(PREEMPT_ON, 1);
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 46b6467..34e7cba 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -606,7 +606,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
- char need_resched_lazy;
char irqs_off;
int hardirq;
int softirq;
@@ -621,17 +620,14 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
'.';
need_resched =
(entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.';
- need_resched_lazy =
- (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
hardsoft_irq =
(hardirq && softirq) ? 'H' :
hardirq ? 'h' :
softirq ? 's' :
'.';
- if (!trace_seq_printf(s, "%c%c%c%c",
- irqs_off, need_resched, need_resched_lazy,
- hardsoft_irq))
+ if (!trace_seq_printf(s, "%c%c%c",
+ irqs_off, need_resched, hardsoft_irq))
return 0;
if (entry->preempt_count)
@@ -639,16 +635,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
else
ret = trace_seq_putc(s, '.');
- if (entry->preempt_lazy_count)
- ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count);
- else
- ret = trace_seq_putc(s, '.');
-
- if (entry->migrate_disable)
- ret = trace_seq_printf(s, "%x", entry->migrate_disable);
- else
- ret = trace_seq_putc(s, '.');
-
return ret;
}
diff --git a/kernel/user.c b/kernel/user.c
index 2800008..5bbb919 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -154,11 +154,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
struct user_struct *alloc_uid(kuid_t uid)
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 13fb113..6991139 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -146,7 +146,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
@@ -170,7 +170,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
@@ -193,7 +193,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
/* Find the matching extent */
extents = map->nr_extents;
- smp_read_barrier_depends();
+ smp_rmb();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].lower_first;
last = first + map->extent[idx].count - 1;
@@ -609,9 +609,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
* were written before the count of the extents.
*
* To achieve this smp_wmb() is used on guarantee the write
- * order and smp_read_barrier_depends() is guaranteed that we
- * don't have crazy architectures returning stale data.
- *
+ * order and smp_rmb() is guaranteed that we don't have crazy
+ * architectures returning stale data.
*/
mutex_lock(&id_map_mutex);
diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c
deleted file mode 100644
index 7dfa86d..0000000
--- a/kernel/wait-simple.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Simple waitqueues without fancy flags and callbacks
- *
- * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
- *
- * Based on kernel/wait.c
- *
- * For licencing details see kernel-base/COPYING
- */
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/wait-simple.h>
-
-/* Adds w to head->list. Must be called with head->lock locked. */
-static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
-{
- list_add(&w->node, &head->list);
- /* We can't let the condition leak before the setting of head */
- smp_mb();
-}
-
-/* Removes w from head->list. Must be called with head->lock locked. */
-static inline void __swait_dequeue(struct swaiter *w)
-{
- list_del_init(&w->node);
-}
-
-void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
-{
- raw_spin_lock_init(&head->lock);
- lockdep_set_class(&head->lock, key);
- INIT_LIST_HEAD(&head->list);
-}
-EXPORT_SYMBOL(__init_swait_head);
-
-void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
-{
- w->task = current;
- if (list_empty(&w->node))
- __swait_enqueue(head, w);
-}
-
-void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&head->lock, flags);
- swait_prepare_locked(head, w);
- __set_current_state(state);
- raw_spin_unlock_irqrestore(&head->lock, flags);
-}
-EXPORT_SYMBOL(swait_prepare);
-
-void swait_finish_locked(struct swait_head *head, struct swaiter *w)
-{
- __set_current_state(TASK_RUNNING);
- if (w->task)
- __swait_dequeue(w);
-}
-
-void swait_finish(struct swait_head *head, struct swaiter *w)
-{
- unsigned long flags;
-
- __set_current_state(TASK_RUNNING);
- if (w->task) {
- raw_spin_lock_irqsave(&head->lock, flags);
- __swait_dequeue(w);
- raw_spin_unlock_irqrestore(&head->lock, flags);
- }
-}
-EXPORT_SYMBOL(swait_finish);
-
-unsigned int
-__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
-{
- struct swaiter *curr, *next;
- int woken = 0;
-
- list_for_each_entry_safe(curr, next, &head->list, node) {
- if (wake_up_state(curr->task, state)) {
- __swait_dequeue(curr);
- /*
- * The waiting task can free the waiter as
- * soon as curr->task = NULL is written,
- * without taking any locks. A memory barrier
- * is required here to prevent the following
- * store to curr->task from getting ahead of
- * the dequeue operation.
- */
- smp_wmb();
- curr->task = NULL;
- if (++woken == num)
- break;
- }
- }
- return woken;
-}
-
-unsigned int
-__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
-{
- unsigned long flags;
- int woken;
-
- if (!swaitqueue_active(head))
- return 0;
-
- raw_spin_lock_irqsave(&head->lock, flags);
- woken = __swait_wake_locked(head, state, num);
- raw_spin_unlock_irqrestore(&head->lock, flags);
- return woken;
-}
-EXPORT_SYMBOL(__swait_wake);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 870b748..4431610 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -205,8 +205,6 @@ static int is_softlockup(unsigned long touch_ts)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
@@ -241,19 +239,10 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
- /*
- * If early-printk is enabled then make sure we do not
- * lock up in printk() and kill console logging:
- */
- printk_kill();
-
- if (hardlockup_panic) {
+ if (hardlockup_panic)
panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- } else {
- raw_spin_lock(&watchdog_output_lock);
+ else
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- raw_spin_unlock(&watchdog_output_lock);
- }
__this_cpu_write(hard_watchdog_warn, true);
return;
@@ -357,7 +346,6 @@ static void watchdog_enable(unsigned int cpu)
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
- hrtimer->irqsafe = 1;
/* Enable the perf event */
watchdog_nmi_enable(cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9efb7ce..60fee69 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,8 +48,6 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
-#include <linux/locallock.h>
-#include <linux/delay.h>
#include "workqueue_internal.h"
@@ -131,11 +129,11 @@ enum {
*
* PL: wq_pool_mutex protected.
*
- * PR: wq_pool_mutex protected for writes. RCU protected for reads.
+ * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
*
* WQ: wq->mutex protected.
*
- * WR: wq->mutex protected for writes. RCU protected for reads.
+ * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
*
* MD: wq_mayday_lock protected.
*/
@@ -180,7 +178,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
- * Destruction of pool is RCU protected to allow dereferences
+ * Destruction of pool is sched-RCU protected to allow dereferences
* from get_work_pool().
*/
struct rcu_head rcu;
@@ -209,7 +207,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
- * itself is also RCU protected so that the first pwq can be
+ * itself is also sched-RCU protected so that the first pwq can be
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
@@ -325,8 +323,6 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
-static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
-
static int worker_thread(void *__worker);
static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from);
@@ -335,14 +331,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
- rcu_lockdep_assert(rcu_read_lock_held() || \
+ rcu_lockdep_assert(rcu_read_lock_sched_held() || \
lockdep_is_held(&wq_pool_mutex), \
- "RCU or wq_pool_mutex should be held")
+ "sched RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
- rcu_lockdep_assert(rcu_read_lock_held() || \
+ rcu_lockdep_assert(rcu_read_lock_sched_held() || \
lockdep_is_held(&wq->mutex), \
- "RCU or wq->mutex should be held")
+ "sched RCU or wq->mutex should be held")
#ifdef CONFIG_LOCKDEP
#define assert_manager_or_pool_lock(pool) \
@@ -364,7 +360,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
* @pool: iteration cursor
* @pi: integer used for iteration
*
- * This must be called either with wq_pool_mutex held or RCU read
+ * This must be called either with wq_pool_mutex held or sched RCU read
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
@@ -397,7 +393,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
* @pwq: iteration cursor
* @wq: the target workqueue
*
- * This must be called either with wq->mutex held or RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@@ -545,7 +541,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
- * This must be called either with pwq_lock held or RCU read locked.
+ * This must be called either with pwq_lock held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@@ -649,8 +645,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
- * access under RCU read lock. As such, this function should be
- * called under wq_pool_mutex or inside of a rcu_read_lock() region.
+ * access under sched-RCU read lock. As such, this function should be
+ * called under wq_pool_mutex or with preemption disabled.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
@@ -808,31 +804,44 @@ static void wake_up_worker(struct worker_pool *pool)
}
/**
- * wq_worker_running - a worker is running again
- * @task: task returning from sleep
+ * wq_worker_waking_up - a worker is waking up
+ * @task: task waking up
+ * @cpu: CPU @task is waking up to
+ *
+ * This function is called during try_to_wake_up() when a worker is
+ * being awoken.
*
- * This function is called when a worker returns from schedule()
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
*/
-void wq_worker_running(struct task_struct *task)
+void wq_worker_waking_up(struct task_struct *task, int cpu)
{
struct worker *worker = kthread_data(task);
- if (!worker->sleeping)
- return;
- if (!(worker->flags & WORKER_NOT_RUNNING))
+ if (!(worker->flags & WORKER_NOT_RUNNING)) {
+ WARN_ON_ONCE(worker->pool->cpu != cpu);
atomic_inc(&worker->pool->nr_running);
- worker->sleeping = 0;
+ }
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
- * This function is called from schedule() when a busy worker is
- * going to sleep.
+ * @cpu: CPU in question, must be the current CPU number
+ *
+ * This function is called during schedule() when a busy worker is
+ * going to sleep. Worker on the same cpu can be woken up by
+ * returning pointer to its task.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * Worker task on @cpu to wake up, %NULL if none.
*/
-void wq_worker_sleeping(struct task_struct *task)
+struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
{
- struct worker *next, *worker = kthread_data(task);
+ struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool;
/*
@@ -841,15 +850,14 @@ void wq_worker_sleeping(struct task_struct *task)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
- return;
+ return NULL;
pool = worker->pool;
- if (WARN_ON_ONCE(worker->sleeping))
- return;
+ /* this can only happen on the local cpu */
+ if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+ return NULL;
- worker->sleeping = 1;
- spin_lock_irq(&pool->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
@@ -862,12 +870,9 @@ void wq_worker_sleeping(struct task_struct *task)
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist)) {
- next = first_worker(pool);
- if (next)
- wake_up_process(next->task);
- }
- spin_unlock_irq(&pool->lock);
+ !list_empty(&pool->worklist))
+ to_wakeup = first_worker(pool);
+ return to_wakeup ? to_wakeup->task : NULL;
}
/**
@@ -1074,12 +1079,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
- * As both pwqs and pools are RCU protected, the
+ * As both pwqs and pools are sched-RCU protected, the
* following lock operations are safe.
*/
- local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
+ spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq);
- local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+ spin_unlock_irq(&pwq->pool->lock);
}
}
@@ -1181,7 +1186,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
- local_lock_irqsave(pendingb_lock, *flags);
+ local_irq_save(*flags);
/* try to steal the timer if it exists */
if (is_dwork) {
@@ -1200,7 +1205,6 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
- rcu_read_lock();
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
@@ -1239,16 +1243,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
- rcu_read_unlock();
return 1;
}
spin_unlock(&pool->lock);
fail:
- rcu_read_unlock();
- local_unlock_irqrestore(pendingb_lock, *flags);
+ local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
- cpu_chill();
+ cpu_relax();
return -EAGAIN;
}
@@ -1317,7 +1319,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
- WARN_ON_ONCE_NONRT(!irqs_disabled());
+ WARN_ON_ONCE(!irqs_disabled());
debug_work_activate(work);
@@ -1325,8 +1327,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
-
- rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
@@ -1383,8 +1383,10 @@ retry:
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
- if (WARN_ON(!list_empty(&work->entry)))
- goto out;
+ if (WARN_ON(!list_empty(&work->entry))) {
+ spin_unlock(&pwq->pool->lock);
+ return;
+ }
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
@@ -1400,9 +1402,7 @@ retry:
insert_work(pwq, work, worklist, work_flags);
-out:
spin_unlock(&pwq->pool->lock);
- rcu_read_unlock();
}
/**
@@ -1422,14 +1422,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
- local_lock_irqsave(pendingb_lock,flags);
+ local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = true;
}
- local_unlock_irqrestore(pendingb_lock, flags);
+ local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_work_on);
@@ -1496,14 +1496,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
- local_lock_irqsave(pendingb_lock, flags);
+ local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_delayed_work(cpu, wq, dwork, delay);
ret = true;
}
- local_unlock_irqrestore(pendingb_lock, flags);
+ local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
@@ -1538,7 +1538,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
- local_unlock_irqrestore(pendingb_lock, flags);
+ local_irq_restore(flags);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
@@ -2809,14 +2809,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
might_sleep();
- rcu_read_lock();
+ local_irq_disable();
pool = get_work_pool(work);
if (!pool) {
- rcu_read_unlock();
+ local_irq_enable();
return false;
}
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@@ -2843,11 +2843,10 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
- rcu_read_unlock();
+
return true;
already_gone:
spin_unlock_irq(&pool->lock);
- rcu_read_unlock();
return false;
}
@@ -2901,7 +2900,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
- local_unlock_irqrestore(pendingb_lock, flags);
+ local_irq_restore(flags);
flush_work(work);
clear_work_data(work);
@@ -2946,10 +2945,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
- local_lock_irq(pendingb_lock);
+ local_irq_disable();
if (del_timer_sync(&dwork->timer))
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_unlock_irq(pendingb_lock);
+ local_irq_enable();
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
@@ -2984,7 +2983,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
- local_unlock_irqrestore(pendingb_lock, flags);
+ local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);
@@ -3170,8 +3169,7 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
- get_online_cpus();
- rcu_read_lock();
+ rcu_read_lock_sched();
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
@@ -3179,8 +3177,7 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
- rcu_read_unlock();
- put_online_cpus();
+ rcu_read_unlock_sched();
return written;
}
@@ -3546,7 +3543,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
- * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
+ * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
@@ -3593,8 +3590,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
- /* RCU protected to allow dereferences from get_work_pool() */
- call_rcu(&pool->rcu, rcu_free_pool);
+ /* sched-RCU protected to allow dereferences from get_work_pool() */
+ call_rcu_sched(&pool->rcu, rcu_free_pool);
}
/**
@@ -3707,7 +3704,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
- call_rcu(&pwq->rcu, rcu_free_pwq);
+ call_rcu_sched(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
@@ -4420,8 +4417,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
- rcu_read_lock();
- preempt_disable();
+ rcu_read_lock_sched();
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
@@ -4432,8 +4428,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
- preempt_enable();
- rcu_read_unlock();
+ rcu_read_unlock_sched();
return ret;
}
@@ -4459,15 +4454,16 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
- rcu_read_lock();
+ local_irq_save(flags);
pool = get_work_pool(work);
if (pool) {
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&pool->lock);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock(&pool->lock);
}
- rcu_read_unlock();
+ local_irq_restore(flags);
+
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
@@ -4920,16 +4916,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- rcu_read_lock();
+ rcu_read_lock_sched();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
- rcu_read_unlock();
+ rcu_read_unlock_sched();
goto out_unlock;
}
}
- rcu_read_unlock();
+ rcu_read_unlock_sched();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 2bb5b5a..7e2204d 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -41,7 +41,6 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
- int sleeping; /* None */
/*
* Opaque string set with work_set_desc(). Printed out with task
@@ -67,7 +66,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/core.c and workqueue.c.
*/
-void wq_worker_running(struct task_struct *task);
-void wq_worker_sleeping(struct task_struct *task);
+void wq_worker_waking_up(struct task_struct *task, int cpu);
+struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index eee9e09..b3c8be0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -57,7 +57,6 @@ config CMPXCHG_LOCKREF
depends on !GENERIC_LOCKBREAK
depends on !DEBUG_SPINLOCK
depends on !DEBUG_LOCK_ALLOC
- depends on !PREEMPT_RT_BASE
config CRC_CCITT
tristate "CRC-CCITT functions"
@@ -344,7 +343,6 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
- depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index bde5dd2..094f315 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -597,7 +597,7 @@ endmenu # "Memory Debugging"
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
- depends on DEBUG_KERNEL && !PREEMPT_RT_BASE
+ depends on DEBUG_KERNEL
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.
diff --git a/lib/Makefile b/lib/Makefile
index 4461595..6af6fbb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -43,11 +43,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-
-ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-endif
lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
GCOV_PROFILE_hweight.o := n
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 1ac2049..bf2c8b1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -308,10 +308,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (preempt_count() == 0 && !irqs_disabled())
-#endif
- fill_pool();
+ fill_pool();
db = get_bucket((unsigned long) addr);
diff --git a/lib/idr.c b/lib/idr.c
index ae69d32..bfe4db4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -37,7 +37,6 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <linux/locallock.h>
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
@@ -390,36 +389,6 @@ int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
}
EXPORT_SYMBOL(__idr_get_new_above);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
-
-static inline void idr_preload_lock(void)
-{
- local_lock(idr_lock);
-}
-
-static inline void idr_preload_unlock(void)
-{
- local_unlock(idr_lock);
-}
-
-void idr_preload_end(void)
-{
- idr_preload_unlock();
-}
-EXPORT_SYMBOL(idr_preload_end);
-#else
-static inline void idr_preload_lock(void)
-{
- preempt_disable();
-}
-
-static inline void idr_preload_unlock(void)
-{
- preempt_enable();
-}
-#endif
-
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
@@ -454,7 +423,7 @@ void idr_preload(gfp_t gfp_mask)
WARN_ON_ONCE(in_interrupt());
might_sleep_if(gfp_mask & __GFP_WAIT);
- idr_preload_lock();
+ preempt_disable();
/*
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
@@ -466,9 +435,9 @@ void idr_preload(gfp_t gfp_mask)
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
struct idr_layer *new;
- idr_preload_unlock();
+ preempt_enable();
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- idr_preload_lock();
+ preempt_disable();
if (!new)
break;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 0acf354..6dc09d8 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1858,7 +1858,6 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* irq-context testcases:
*/
@@ -1871,28 +1870,6 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
-#else
- /* On -rt, we only do hardirq context test for raw spinlock */
- DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
- DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
-
- DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
- DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
-
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
-
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
- DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
-#endif
ww_tests();
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca78..fc67547 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
- int len = strlen(str) + 1;
- int d = nla_len(nla) - len;
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+ if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c
index 2db0f42..652a8ee 100644
--- a/lib/percpu-rwsem.c
+++ b/lib/percpu-rwsem.c
@@ -84,12 +84,8 @@ void percpu_down_read(struct percpu_rw_semaphore *brw)
down_read(&brw->rw_sem);
atomic_inc(&brw->slow_read_ctr);
-#ifdef CONFIG_PREEMPT_RT_FULL
- up_read(&brw->rw_sem);
-#else
/* avoid up_read()->rwsem_release() */
__up_read(&brw->rw_sem);
-#endif
}
void percpu_up_read(struct percpu_rw_semaphore *brw)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e7b61e8..7811ed3 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -221,13 +221,12 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &get_cpu_var(radix_tree_preloads);
+ rtp = &__get_cpu_var(radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
- put_cpu_var(radix_tree_preloads);
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -262,7 +261,6 @@ radix_tree_node_free(struct radix_tree_node *node)
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
@@ -328,7 +326,6 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
-#endif
/*
* Return the maximum key which can be store into a
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9c1236e..d16fa29 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -582,7 +582,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
- WARN_ON_ONCE(!pagefault_disabled());
+ WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
@@ -627,7 +627,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
if (!sg_miter_skip(&miter, skip))
return false;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
@@ -644,7 +644,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
sg_miter_stop(&miter);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
return offset;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index dbb1570..4c0d0e5 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
- "code: %s/%d\n", preempt_count() - 1,
- __migrate_disabled(current), current->comm, current->pid);
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+ "code: %s/%d\n",
+ preempt_count() - 1, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9497033..0374a59 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -31,7 +31,6 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);
-#ifndef CONFIG_PREEMPT_RT_FULL
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
@@ -49,7 +48,6 @@ void __rwlock_init(rwlock_t *lock, const char *name,
}
EXPORT_SYMBOL(__rwlock_init);
-#endif
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
@@ -161,7 +159,6 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
@@ -303,5 +300,3 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
-
-#endif
diff --git a/localversion-rt b/localversion-rt
deleted file mode 100644
index c5b71f9..0000000
--- a/localversion-rt
+++ /dev/null
@@ -1 +0,0 @@
--rt25
diff --git a/mm/Kconfig b/mm/Kconfig
index 083685a..394838f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -384,7 +384,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
select COMPACTION
help
Transparent Hugepages allows the kernel to use huge pages and
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index ce682f7..09d9591 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -288,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
* Note, we wouldn't bother setting up the timer, but this function is on the
* fast-path (used by '__mark_inode_dirty()'), so we save few context switches
* by delaying the wake-up.
+ *
+ * We have to be careful not to postpone flush work if it is scheduled for
+ * earlier. Thus we use queue_delayed_work().
*/
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
{
unsigned long timeout;
timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
- mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
+ spin_lock_bh(&bdi->wb_lock);
+ if (test_bit(BDI_registered, &bdi->state))
+ queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
+ spin_unlock_bh(&bdi->wb_lock);
}
/*
@@ -307,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
spin_unlock_bh(&bdi_lock);
synchronize_rcu_expedited();
-
- /* bdi_list is now unused, clear it to mark @bdi dying */
- INIT_LIST_HEAD(&bdi->bdi_list);
}
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -360,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
*/
bdi_remove_from_list(bdi);
+ /* Make sure nobody queues further work */
+ spin_lock_bh(&bdi->wb_lock);
+ clear_bit(BDI_registered, &bdi->state);
+ spin_unlock_bh(&bdi->wb_lock);
+
/*
* Drain work list and shutdown the delayed_work. At this point,
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
diff --git a/mm/bounce.c b/mm/bounce.c
index b09bb4e..5a7d58f 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned long flags;
unsigned char *vto;
- local_irq_save_nort(flags);
+ local_irq_save(flags);
vto = kmap_atomic(to->bv_page);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
kunmap_atomic(vto);
- local_irq_restore_nort(flags);
+ local_irq_restore(flags);
}
#else /* CONFIG_HIGHMEM */
diff --git a/mm/filemap.c b/mm/filemap.c
index 3d2d39a..ae4846f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1976,7 +1976,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
char *kaddr;
size_t copied;
- BUG_ON(!pagefault_disabled());
+ BUG_ON(!in_atomic());
kaddr = kmap_atomic(page);
if (likely(i->nr_segs == 1)) {
int left;
diff --git a/mm/highmem.c b/mm/highmem.c
index b1c7d43..b32b70c 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -29,11 +29,10 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
-#ifndef CONFIG_PREEMPT_RT_FULL
+
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif
-#endif
/*
* Virtual_count is not a pure "count".
@@ -48,9 +47,8 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx);
unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);
-#ifndef CONFIG_PREEMPT_RT_FULL
+
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
-#endif
unsigned int nr_free_highpages (void)
{
diff --git a/mm/ksm.c b/mm/ksm.c
index 0bea2b2..c78fff1 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
static struct page *page_trans_compound_anon(struct page *page)
{
if (PageTransCompound(page)) {
- struct page *head = compound_trans_head(page);
+ struct page *head = compound_head(page);
/*
* head may actually be splitted and freed from under
* us but it's ok here.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bc16ebc..15429b9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2473,7 +2473,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
- curcpu = get_cpu_light();
+ curcpu = get_cpu();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
@@ -2490,7 +2490,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
schedule_work_on(cpu, &stock->work);
}
}
- put_cpu_light();
+ put_cpu();
if (!sync)
goto out;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index de476c2..5ea3cf7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1659,7 +1659,7 @@ int soft_offline_page(struct page *page, int flags)
{
int ret;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_trans_head(page);
+ struct page *hpage = compound_head(page);
if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn);
diff --git a/mm/memory.c b/mm/memory.c
index 0dcdc84..22e67a2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3742,32 +3742,6 @@ unlock:
return 0;
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-void pagefault_disable(void)
-{
- migrate_disable();
- current->pagefault_disabled++;
- /*
- * make sure to have issued the store before a pagefault
- * can hit.
- */
- barrier();
-}
-EXPORT_SYMBOL(pagefault_disable);
-
-void pagefault_enable(void)
-{
- /*
- * make sure to issue those last loads/stores before enabling
- * the pagefault handler again.
- */
- barrier();
- current->pagefault_disabled--;
- migrate_enable();
-}
-EXPORT_SYMBOL(pagefault_enable);
-#endif
-
/*
* By the time we get here, we already hold the mm semaphore
*/
@@ -4344,35 +4318,3 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-
-#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
-/*
- * Heinous hack, relies on the caller doing something like:
- *
- * pte = alloc_pages(PGALLOC_GFP, 0);
- * if (pte)
- * pgtable_page_ctor(pte);
- * return pte;
- *
- * This ensures we release the page and return NULL when the
- * lock allocation fails.
- */
-struct page *pte_lock_init(struct page *page)
-{
- page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (page->ptl) {
- spin_lock_init(__pte_lockptr(page));
- } else {
- __free_page(page);
- page = NULL;
- }
- return page;
-}
-
-void pte_lock_deinit(struct page *page)
-{
- kfree(page->ptl);
- page->mapping = NULL;
-}
-
-#endif
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index adfce87..8a8cd02 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -23,7 +23,6 @@ void use_mm(struct mm_struct *mm)
struct task_struct *tsk = current;
task_lock(tsk);
- preempt_disable_rt();
active_mm = tsk->active_mm;
if (active_mm != mm) {
atomic_inc(&mm->mm_count);
@@ -31,7 +30,6 @@ void use_mm(struct mm_struct *mm)
}
tsk->mm = mm;
switch_mm(active_mm, mm, tsk);
- preempt_enable_rt();
task_unlock(tsk);
if (active_mm != mm)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f200af2..6fca390 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,7 +61,6 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
-#include <linux/locallock.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -231,18 +230,6 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
-static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define cpu_lock_irqsave(cpu, flags) \
- local_lock_irqsave_on(pa_lock, flags, cpu)
-# define cpu_unlock_irqrestore(cpu, flags) \
- local_unlock_irqrestore_on(pa_lock, flags, cpu)
-#else
-# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
-# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
-#endif
-
int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -382,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- __SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
@@ -646,7 +635,7 @@ static inline int free_pages_check(struct page *page)
}
/*
- * Frees a number of pages which have been collected from the pcp lists.
+ * Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
@@ -657,49 +646,15 @@ static inline int free_pages_check(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
- struct list_head *list)
+ struct per_cpu_pages *pcp)
{
+ int migratetype = 0;
+ int batch_free = 0;
int to_free = count;
- unsigned long flags;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->pages_scanned = 0;
- while (!list_empty(list)) {
- struct page *page = list_first_entry(list, struct page, lru);
- int mt; /* migratetype of the to-be-freed page */
-
- /* must delete as __free_one_page list manipulates */
- list_del(&page->lru);
-
- mt = get_freepage_migratetype(page);
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, zone, 0, mt);
- trace_mm_page_pcpu_drain(page, 0, mt);
- if (likely(!is_migrate_isolate_page(page))) {
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
- if (is_migrate_cma(mt))
- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
- }
-
- to_free--;
- }
- WARN_ON(to_free != 0);
- spin_unlock_irqrestore(&zone->lock, flags);
-}
-
-/*
- * Moves a number of pages from the PCP lists to free list which
- * is freed outside of the locked region.
- *
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- */
-static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
- struct list_head *dst)
-{
- int migratetype = 0, batch_free = 0;
-
while (to_free) {
struct page *page;
struct list_head *list;
@@ -715,7 +670,7 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
- list = &src->lists[migratetype];
+ list = &pcp->lists[migratetype];
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
@@ -723,25 +678,35 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
batch_free = to_free;
do {
- page = list_last_entry(list, struct page, lru);
+ int mt; /* migratetype of the to-be-freed page */
+
+ page = list_entry(list->prev, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
list_del(&page->lru);
- list_add(&page->lru, dst);
+ mt = get_freepage_migratetype(page);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, mt);
+ trace_mm_page_pcpu_drain(page, 0, mt);
+ if (likely(!is_migrate_isolate_page(page))) {
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
+ if (is_migrate_cma(mt))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
+ }
} while (--to_free && --batch_free && !list_empty(list));
}
+ spin_unlock(&zone->lock);
}
static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
- unsigned long flags;
-
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
if (unlikely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
}
static bool free_pages_prepare(struct page *page, unsigned int order)
@@ -779,12 +744,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order))
return;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
free_one_page(page_zone(page), page, order, migratetype);
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
void __init __free_pages_bootmem(struct page *page, unsigned int order)
@@ -1239,22 +1204,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
- LIST_HEAD(dst);
int to_drain;
unsigned long batch;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
batch = ACCESS_ONCE(pcp->batch);
if (pcp->count >= batch)
to_drain = batch;
else
to_drain = pcp->count;
if (to_drain > 0) {
- isolate_pcp_pages(to_drain, pcp, &dst);
+ free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
}
- local_unlock_irqrestore(pa_lock, flags);
- free_pcppages_bulk(zone, to_drain, &dst);
+ local_irq_restore(flags);
}
static bool gfp_thisnode_allocation(gfp_t gfp_mask)
{
@@ -1282,21 +1245,16 @@ static void drain_pages(unsigned int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- LIST_HEAD(dst);
- int count;
- cpu_lock_irqsave(cpu, flags);
+ local_irq_save(flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- count = pcp->count;
- if (count) {
- isolate_pcp_pages(count, pcp, &dst);
+ if (pcp->count) {
+ free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
- cpu_unlock_irqrestore(cpu, flags);
- if (count)
- free_pcppages_bulk(zone, count, &dst);
+ local_irq_restore(flags);
}
}
@@ -1349,12 +1307,7 @@ void drain_all_pages(void)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
-#ifndef CONFIG_PREEMPT_RT_BASE
on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
-#else
- for_each_cpu(cpu, &cpus_with_pcps)
- drain_pages(cpu);
-#endif
}
#ifdef CONFIG_HIBERNATION
@@ -1409,7 +1362,7 @@ void free_hot_cold_page(struct page *page, int cold)
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
__count_vm_event(PGFREE);
/*
@@ -1435,17 +1388,12 @@ void free_hot_cold_page(struct page *page, int cold)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = ACCESS_ONCE(pcp->batch);
- LIST_HEAD(dst);
-
- isolate_pcp_pages(batch, pcp, &dst);
+ free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
- local_unlock_irqrestore(pa_lock, flags);
- free_pcppages_bulk(zone, batch, &dst);
- return;
}
out:
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
/*
@@ -1575,7 +1523,7 @@ again:
struct per_cpu_pages *pcp;
struct list_head *list;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
@@ -1607,15 +1555,13 @@ again:
*/
WARN_ON_ONCE(order > 1);
}
- local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
- if (!page) {
- spin_unlock(&zone->lock);
+ spin_unlock(&zone->lock);
+ if (!page)
goto failed;
- }
__mod_zone_freepage_state(zone, -(1 << order),
get_pageblock_migratetype(page));
- spin_unlock(&zone->lock);
}
/*
@@ -1627,7 +1573,7 @@ again:
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1635,7 +1581,7 @@ again:
return page;
failed:
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
return NULL;
}
@@ -2314,8 +2260,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct page *page;
/* Page migration frees to the PCP lists but we want merging */
- drain_pages(get_cpu_light());
- put_cpu_light();
+ drain_pages(get_cpu());
+ put_cpu();
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
@@ -5517,7 +5463,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
- local_irq_lock_init(pa_lock);
}
/*
@@ -6381,7 +6326,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -6390,7 +6335,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 98caeee..6d757e3a 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -13,14 +13,6 @@
static unsigned long total_usage;
-static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages)
-{
-#ifdef CONFIG_PREEMPT_RT_BASE
- for (; nr_pages; nr_pages--, pc++)
- spin_lock_init(&pc->pcg_lock);
-#endif
-}
-
#if !defined(CONFIG_SPARSEMEM)
@@ -68,7 +60,6 @@ static int __init alloc_node_page_cgroup(int nid)
return -ENOMEM;
NODE_DATA(nid)->node_page_cgroup = base;
total_usage += table_size;
- page_cgroup_lock_init(base, nr_pages);
return 0;
}
@@ -159,8 +150,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
return -ENOMEM;
}
- page_cgroup_lock_init(base, PAGES_PER_SECTION);
-
/*
* The passed "pfn" may not be aligned to SECTION. For the calculation
* we need to apply a mask.
diff --git a/mm/slab.h b/mm/slab.h
index 8ffb287..a535033 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -247,11 +247,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* The slab lists for all objects.
*/
struct kmem_cache_node {
-#ifdef CONFIG_SLUB
- raw_spinlock_t list_lock;
-#else
spinlock_t list_lock;
-#endif
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
diff --git a/mm/slub.c b/mm/slub.c
index a164648..5c1343a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1087,7 +1087,7 @@ static noinline struct kmem_cache_node *free_debug_processing(
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- raw_spin_lock_irqsave(&n->list_lock, *flags);
+ spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);
if (!check_slab(s, page))
@@ -1135,7 +1135,7 @@ out:
fail:
slab_unlock(page);
- raw_spin_unlock_irqrestore(&n->list_lock, *flags);
+ spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object);
return NULL;
}
@@ -1270,12 +1270,6 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
#endif /* CONFIG_SLUB_DEBUG */
-struct slub_free_list {
- raw_spinlock_t lock;
- struct list_head list;
-};
-static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
-
/*
* Slab allocation and freeing
*/
@@ -1297,15 +1291,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
- bool enableirqs;
flags &= gfp_allowed_mask;
- enableirqs = (flags & __GFP_WAIT) != 0;
-#ifdef CONFIG_PREEMPT_RT_FULL
- enableirqs |= system_state == SYSTEM_RUNNING;
-#endif
- if (enableirqs)
+ if (flags & __GFP_WAIT)
local_irq_enable();
flags |= s->allocflags;
@@ -1345,7 +1334,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
kmemcheck_mark_unallocated_pages(page, pages);
}
- if (enableirqs)
+ if (flags & __GFP_WAIT)
local_irq_disable();
if (!page)
return NULL;
@@ -1363,10 +1352,8 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
-#ifndef CONFIG_PREEMPT_RT_FULL
if (unlikely(s->ctor))
s->ctor(object);
-#endif
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1444,16 +1431,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_memcg_kmem_pages(page, order);
}
-static void free_delayed(struct list_head *h)
-{
- while(!list_empty(h)) {
- struct page *page = list_first_entry(h, struct page, lru);
-
- list_del(&page->lru);
- __free_slab(page->slab_cache, page);
- }
-}
-
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
@@ -1488,12 +1465,6 @@ static void free_slab(struct kmem_cache *s, struct page *page)
}
call_rcu(head, rcu_free_slab);
- } else if (irqs_disabled()) {
- struct slub_free_list *f = &__get_cpu_var(slub_free_list);
-
- raw_spin_lock(&f->lock);
- list_add(&page->lru, &f->list);
- raw_spin_unlock(&f->lock);
} else
__free_slab(s, page);
}
@@ -1598,7 +1569,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
@@ -1623,7 +1594,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
return object;
}
@@ -1866,7 +1837,7 @@ redo:
* that acquire_slab() will see a slab page that
* is frozen
*/
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
} else {
m = M_FULL;
@@ -1877,7 +1848,7 @@ redo:
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
}
@@ -1912,7 +1883,7 @@ redo:
goto redo;
if (lock)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
@@ -1944,10 +1915,10 @@ static void unfreeze_partials(struct kmem_cache *s,
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
n = n2;
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
do {
@@ -1976,7 +1947,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
while (discard_page) {
page = discard_page;
@@ -2014,21 +1985,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
- struct slub_free_list *f;
unsigned long flags;
- LIST_HEAD(tofree);
/*
* partial array is full. Move the existing
* set to the per node partial list.
*/
local_irq_save(flags);
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- f = &__get_cpu_var(slub_free_list);
- raw_spin_lock(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock(&f->lock);
local_irq_restore(flags);
- free_delayed(&tofree);
oldpage = NULL;
pobjects = 0;
pages = 0;
@@ -2092,22 +2056,7 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s)
{
- LIST_HEAD(tofree);
- int cpu;
-
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
- for_each_online_cpu(cpu) {
- struct slub_free_list *f;
-
- if (!has_cpu_slab(cpu, s))
- continue;
-
- f = &per_cpu(slub_free_list, cpu);
- raw_spin_lock_irq(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock_irq(&f->lock);
- free_delayed(&tofree);
- }
}
/*
@@ -2135,10 +2084,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
unsigned long x = 0;
struct page *page;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
@@ -2281,11 +2230,9 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
- struct slub_free_list *f;
void *freelist;
struct page *page;
unsigned long flags;
- LIST_HEAD(tofree);
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -2348,13 +2295,7 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
-out:
- f = &__get_cpu_var(slub_free_list);
- raw_spin_lock(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock(&f->lock);
local_irq_restore(flags);
- free_delayed(&tofree);
return freelist;
new_slab:
@@ -2372,7 +2313,9 @@ new_slab:
if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
- goto out;
+
+ local_irq_restore(flags);
+ return NULL;
}
page = c->page;
@@ -2387,7 +2330,8 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
- goto out;
+ local_irq_restore(flags);
+ return freelist;
}
/*
@@ -2472,10 +2416,6 @@ redo:
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->object_size);
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (unlikely(s->ctor) && object)
- s->ctor(object);
-#endif
slab_post_alloc_hook(s, gfpflags, object);
@@ -2563,7 +2503,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
do {
if (unlikely(n)) {
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
n = NULL;
}
prior = page->freelist;
@@ -2595,7 +2535,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
}
}
@@ -2637,7 +2577,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
@@ -2651,7 +2591,7 @@ slab_empty:
/* Slab must be on the full list */
remove_full(s, page);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
@@ -2853,7 +2793,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
- raw_spin_lock_init(&n->list_lock);
+ spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
@@ -3439,7 +3379,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
for (i = 0; i < objects; i++)
INIT_LIST_HEAD(slabs_by_inuse + i);
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
/*
* Build lists indexed by the items in use in each slab.
@@ -3460,7 +3400,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
for (i = objects - 1; i > 0; i--)
list_splice(slabs_by_inuse + i, n->partial.prev);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
@@ -3636,12 +3576,6 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
- INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
- }
if (debug_guardpage_minorder())
slub_max_order = 0;
@@ -3946,7 +3880,7 @@ static int validate_slab_node(struct kmem_cache *s,
struct page *page;
unsigned long flags;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
@@ -3969,7 +3903,7 @@ static int validate_slab_node(struct kmem_cache *s,
atomic_long_read(&n->nr_slabs));
out:
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
@@ -4159,12 +4093,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!atomic_long_read(&n->nr_slabs))
continue;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc, map);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc, map);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
diff --git a/mm/swap.c b/mm/swap.c
index b55ee9c..aa4da5d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -32,7 +32,6 @@
#include <linux/gfp.h>
#include <linux/uio.h>
#include <linux/hugetlb.h>
-#include <linux/locallock.h>
#include "internal.h"
@@ -46,9 +45,6 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
-static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
-static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-
/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
@@ -88,7 +84,7 @@ static void put_compound_page(struct page *page)
{
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
@@ -226,7 +222,7 @@ bool __get_page_tail(struct page *page)
*/
unsigned long flags;
bool got = false;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/* Ref to put_compound_page() comment. */
@@ -412,11 +408,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags;
page_cache_get(page);
- local_lock_irqsave(rotate_lock, flags);
+ local_irq_save(flags);
pvec = &__get_cpu_var(lru_rotate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -467,13 +463,12 @@ static bool need_activate_page_drain(int cpu)
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- activate_page_pvecs);
+ struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_locked_var(swapvec_lock, activate_page_pvecs);
+ put_cpu_var(activate_page_pvecs);
}
}
@@ -499,7 +494,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
int i;
/*
@@ -521,7 +516,7 @@ static void __lru_cache_activate_page(struct page *page)
}
}
- put_locked_var(swapvec_lock, lru_add_pvec);
+ put_cpu_var(lru_add_pvec);
}
/*
@@ -561,13 +556,13 @@ EXPORT_SYMBOL(mark_page_accessed);
*/
void __lru_cache_add(struct page *page)
{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
page_cache_get(page);
if (!pagevec_space(pvec))
__pagevec_lru_add(pvec);
pagevec_add(pvec, page);
- put_locked_var(swapvec_lock, lru_add_pvec);
+ put_cpu_var(lru_add_pvec);
}
EXPORT_SYMBOL(__lru_cache_add);
@@ -690,9 +685,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_lock_irqsave(rotate_lock, flags);
+ local_irq_save(flags);
pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
+ local_irq_restore(flags);
}
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
@@ -720,19 +715,18 @@ void deactivate_page(struct page *page)
return;
if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- lru_deactivate_pvecs);
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ put_cpu_var(lru_deactivate_pvecs);
}
}
void lru_add_drain(void)
{
- lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
- local_unlock_cpu(swapvec_lock);
+ lru_add_drain_cpu(get_cpu());
+ put_cpu();
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d64289d..1074543 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -790,7 +790,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
- int node, err, cpu;
+ int node, err;
node = numa_node_id();
@@ -828,12 +828,11 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
BUG_ON(err);
radix_tree_preload_end();
- cpu = get_cpu_light();
- vbq = &__get_cpu_var(vmap_block_queue);
+ vbq = &get_cpu_var(vmap_block_queue);
spin_lock(&vbq->lock);
list_add_rcu(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
- put_cpu_light();
+ put_cpu_var(vmap_block_queue);
return vb;
}
@@ -901,7 +900,6 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
- int cpu = 0;
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -917,8 +915,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
again:
rcu_read_lock();
- cpu = get_cpu_light();
- vbq = &__get_cpu_var(vmap_block_queue);
+ vbq = &get_cpu_var(vmap_block_queue);
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
int i;
@@ -942,7 +939,7 @@ next:
spin_unlock(&vb->lock);
}
- put_cpu_light();
+ put_cpu_var(vmap_block_queue);
rcu_read_unlock();
if (!addr) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index efea337..5a442a7 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -217,7 +217,6 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long x;
long t;
- preempt_disable_rt();
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -227,7 +226,6 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
- preempt_enable_rt();
}
EXPORT_SYMBOL(__mod_zone_page_state);
@@ -260,7 +258,6 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
- preempt_disable_rt();
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -269,7 +266,6 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
- preempt_enable_rt();
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -284,7 +280,6 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
- preempt_disable_rt();
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -293,7 +288,6 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
- preempt_enable_rt();
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 61fc573..856499f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
vlandev->gso_max_size = dev->gso_max_size;
- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index edf44d0..d1537dc 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -557,6 +557,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
@@ -608,7 +611,8 @@ static int vlan_dev_init(struct net_device *dev)
#endif
dev->needed_headroom = real_dev->needed_headroom;
- if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+ if (vlan_hw_offload_capable(real_dev->features,
+ vlan_dev_priv(dev)->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8db3e89..a3af2b7 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3565,7 +3565,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
- if (ltk->type & HCI_SMP_STK) {
+ /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
+ * temporary key used to encrypt a connection following
+ * pairing. It is used during the Encrypted Session Setup to
+ * distribute the keys. Later, security can be re-established
+ * using a distributed LTK.
+ */
+ if (ltk->type == HCI_SMP_STK_SLAVE) {
list_del(&ltk->list);
kfree(ltk);
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1b148a3..162d6c7 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1129,9 +1129,10 @@ static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_querier *querier,
int saddr,
+ bool is_general_query,
unsigned long max_delay)
{
- if (saddr)
+ if (saddr && is_general_query)
br_multicast_update_querier_timer(br, querier, max_delay);
else if (timer_pending(&querier->timer))
return;
@@ -1183,8 +1184,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
+ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+ * all-systems destination addresses (224.0.0.1) for general queries
+ */
+ if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
- max_delay);
+ !group, max_delay);
if (!group)
goto out;
@@ -1230,6 +1239,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
unsigned long max_delay;
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
+ bool is_general_query;
int err = 0;
u16 vid = 0;
@@ -1238,6 +1248,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1259,8 +1275,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
+ is_general_query = group && ipv6_addr_any(group);
+
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+ * all-nodes destination address (ff02::1) for general queries
+ */
+ if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip6_querier,
- !ipv6_addr_any(&ip6h->saddr), max_delay);
+ !ipv6_addr_any(&ip6h->saddr),
+ is_general_query, max_delay);
if (!group)
goto out;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 4a5df7b..464303f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3126,7 +3126,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
INIT_LIST_HEAD(&m->data);
/* front */
- m->front_max = front_len;
if (front_len) {
if (front_len > PAGE_CACHE_SIZE) {
m->front.iov_base = __vmalloc(front_len, flags,
@@ -3143,7 +3142,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
} else {
m->front.iov_base = NULL;
}
- m->front.iov_len = front_len;
+ m->front_alloc_len = m->front.iov_len = front_len;
dout("ceph_msg_new %p front %d\n", m, front_len);
return m;
@@ -3301,8 +3300,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
void ceph_msg_dump(struct ceph_msg *msg)
{
- pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
- msg->front_max, msg->data_length);
+ pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
+ msg->front_alloc_len, msg->data_length);
print_hex_dump(KERN_DEBUG, "header: ",
DUMP_PREFIX_OFFSET, 16, 1,
&msg->hdr, sizeof(msg->hdr), true);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 1fe25cd..2ac9ef3 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
/* initiatiate authentication handshake */
ret = ceph_auth_build_hello(monc->auth,
monc->m_auth->front.iov_base,
- monc->m_auth->front_max);
+ monc->m_auth->front_alloc_len);
__send_prepared_auth_request(monc, ret);
} else {
dout("open_session mon%d already open\n", monc->cur_mon);
@@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
int num;
p = msg->front.iov_base;
- end = p + msg->front_max;
+ end = p + msg->front_alloc_len;
num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
ceph_encode_32(&p, num);
@@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
msg->front.iov_len,
monc->m_auth->front.iov_base,
- monc->m_auth->front_max);
+ monc->m_auth->front_alloc_len);
if (ret < 0) {
monc->client->auth_err = ret;
wake_up_all(&monc->client->auth_wq);
@@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
return 0;
ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
- monc->m_auth->front_max);
+ monc->m_auth->front_alloc_len);
if (ret <= 0)
return ret; /* either an error, or no need to authenticate */
__send_prepared_auth_request(monc, ret);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 2b4b32a..e6b2db6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1232,6 +1232,22 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
EXPORT_SYMBOL(ceph_osdc_set_request_linger);
/*
+ * Returns whether a request should be blocked from being sent
+ * based on the current osdmap and osd_client settings.
+ *
+ * Caller should hold map_sem for read.
+ */
+static bool __req_should_be_paused(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
+ bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
+ (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
+}
+
+/*
* Pick an osd (the first 'up' osd in the pg), allocate the osd struct
* (as needed), and set the request r_osd appropriately. If there is
* no up osd, set r_osd to NULL. Move the request to the appropriate list
@@ -1248,6 +1264,7 @@ static int __map_request(struct ceph_osd_client *osdc,
int acting[CEPH_PG_MAX_SIZE];
int o = -1, num = 0;
int err;
+ bool was_paused;
dout("map_request %p tid %lld\n", req, req->r_tid);
err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
@@ -1264,12 +1281,18 @@ static int __map_request(struct ceph_osd_client *osdc,
num = err;
}
+ was_paused = req->r_paused;
+ req->r_paused = __req_should_be_paused(osdc, req);
+ if (was_paused && !req->r_paused)
+ force_resend = 1;
+
if ((!force_resend &&
req->r_osd && req->r_osd->o_osd == o &&
req->r_sent >= req->r_osd->o_incarnation &&
req->r_num_pg_osds == num &&
memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
- (req->r_osd == NULL && o == -1))
+ (req->r_osd == NULL && o == -1) ||
+ req->r_paused)
return 0; /* no change */
dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
@@ -1613,14 +1636,17 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
*
* Caller should hold map_sem for read.
*/
-static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
+ bool force_resend_writes)
{
struct ceph_osd_request *req, *nreq;
struct rb_node *p;
int needmap = 0;
int err;
+ bool force_resend_req;
- dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
+ dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
+ force_resend_writes ? " (force resend writes)" : "");
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
@@ -1645,7 +1671,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
continue;
}
- err = __map_request(osdc, req, force_resend);
+ force_resend_req = force_resend ||
+ (force_resend_writes &&
+ req->r_flags & CEPH_OSD_FLAG_WRITE);
+ err = __map_request(osdc, req, force_resend_req);
if (err < 0)
continue; /* error */
if (req->r_osd == NULL) {
@@ -1665,7 +1694,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
r_linger_item) {
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
- err = __map_request(osdc, req, force_resend);
+ err = __map_request(osdc, req,
+ force_resend || force_resend_writes);
dout("__map_request returned %d\n", err);
if (err == 0)
continue; /* no change and no osd was specified */
@@ -1707,6 +1737,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
struct ceph_osdmap *newmap = NULL, *oldmap;
int err;
struct ceph_fsid fsid;
+ bool was_full;
dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
p = msg->front.iov_base;
@@ -1720,6 +1751,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
down_write(&osdc->map_sem);
+ was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(" %d inc maps\n", nr_maps);
@@ -1744,7 +1777,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = newmap;
}
- kick_requests(osdc, 0);
+ was_full = was_full ||
+ ceph_osdmap_flag(osdc->osdmap,
+ CEPH_OSDMAP_FULL);
+ kick_requests(osdc, 0, was_full);
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
@@ -1787,7 +1823,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
skipped_map = 1;
ceph_osdmap_destroy(oldmap);
}
- kick_requests(osdc, skipped_map);
+ was_full = was_full ||
+ ceph_osdmap_flag(osdc->osdmap,
+ CEPH_OSDMAP_FULL);
+ kick_requests(osdc, skipped_map, was_full);
}
p += maplen;
nr_maps--;
@@ -1804,7 +1843,9 @@ done:
* we find out when we are no longer full and stop returning
* ENOSPC.
*/
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
+ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
ceph_monc_request_next_osdmap(&osdc->client->monc);
mutex_lock(&osdc->request_mutex);
@@ -2454,7 +2495,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
struct ceph_osd_client *osdc = osd->o_osdc;
struct ceph_msg *m;
struct ceph_osd_request *req;
- int front = le32_to_cpu(hdr->front_len);
+ int front_len = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
u64 tid;
@@ -2474,12 +2515,13 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
req->r_reply, req->r_reply->con);
ceph_msg_revoke_incoming(req->r_reply);
- if (front > req->r_reply->front.iov_len) {
+ if (front_len > req->r_reply->front_alloc_len) {
pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n",
- front, (int)req->r_reply->front.iov_len,
+ front_len, req->r_reply->front_alloc_len,
(unsigned int)con->peer_name.type,
le64_to_cpu(con->peer_name.num));
- m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
+ m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
+ false);
if (!m)
goto out;
ceph_msg_put(req->r_reply);
diff --git a/net/core/dev.c b/net/core/dev.c
index ab4df3d..b327975 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -175,7 +175,6 @@ static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
-static DEFINE_MUTEX(devnet_rename_mutex);
static inline void dev_base_seq_inc(struct net *net)
{
@@ -197,14 +196,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ spin_lock(&sd->input_pkt_queue.lock);
#endif
}
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
@@ -827,8 +826,7 @@ retry:
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- mutex_lock(&devnet_rename_mutex);
- mutex_unlock(&devnet_rename_mutex);
+ cond_resched();
goto retry;
}
@@ -1094,28 +1092,30 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
- mutex_lock(&devnet_rename_mutex);
- __write_seqcount_begin(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
- if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
- goto outunlock;
+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+ write_seqcount_end(&devnet_rename_seq);
+ return 0;
+ }
memcpy(oldname, dev->name, IFNAMSIZ);
err = dev_get_valid_name(net, dev, newname);
- if (err < 0)
- goto outunlock;
+ if (err < 0) {
+ write_seqcount_end(&devnet_rename_seq);
+ return err;
+ }
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- err = ret;
- goto outunlock;
+ write_seqcount_end(&devnet_rename_seq);
+ return ret;
}
- __write_seqcount_end(&devnet_rename_seq);
- mutex_unlock(&devnet_rename_mutex);
+ write_seqcount_end(&devnet_rename_seq);
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
@@ -1134,8 +1134,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- mutex_lock(&devnet_rename_mutex);
- __write_seqcount_begin(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
@@ -1145,11 +1144,6 @@ rollback:
}
return err;
-
-outunlock:
- __write_seqcount_end(&devnet_rename_seq);
- mutex_unlock(&devnet_rename_mutex);
- return err;
}
/**
@@ -2138,7 +2132,6 @@ static inline void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
void __netif_schedule(struct Qdisc *q)
@@ -2160,7 +2153,6 @@ void dev_kfree_skb_irq(struct sk_buff *skb)
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
@@ -3212,7 +3204,6 @@ enqueue:
rps_unlock(sd);
local_irq_restore(flags);
- preempt_check_resched_rt();
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -3250,7 +3241,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- migrate_disable();
+ preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -3260,13 +3251,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- migrate_enable();
+ preempt_enable();
} else
#endif
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
- put_cpu_light();
+ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+ put_cpu();
}
return ret;
}
@@ -3276,44 +3267,16 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
- local_bh_disable();
+ preempt_disable();
err = netif_rx(skb);
- local_bh_enable();
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
return err;
}
EXPORT_SYMBOL(netif_rx_ni);
-#ifdef CONFIG_PREEMPT_RT_FULL
-/*
- * RT runs ksoftirqd as a real time thread and the root_lock is a
- * "sleeping spinlock". If the trylock fails then we can go into an
- * infinite loop when ksoftirqd preempted the task which actually
- * holds the lock, because we requeue q and raise NET_TX softirq
- * causing ksoftirqd to loop forever.
- *
- * It's safe to use spin_lock on RT here as softirqs run in thread
- * context and cannot deadlock against the thread which is holding
- * root_lock.
- *
- * On !RT the trylock might fail, but there we bail out from the
- * softirq loop after 10 attempts which we can't do on RT. And the
- * task holding root_lock cannot be preempted, so the only downside of
- * that trylock is that we need 10 loops to decide that we should have
- * given up in the first one :)
- */
-static inline int take_root_lock(spinlock_t *lock)
-{
- spin_lock(lock);
- return 1;
-}
-#else
-static inline int take_root_lock(spinlock_t *lock)
-{
- return spin_trylock(lock);
-}
-#endif
-
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -3352,7 +3315,7 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
root_lock = qdisc_lock(q);
- if (take_root_lock(root_lock)) {
+ if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
@@ -3743,7 +3706,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
@@ -3752,13 +3715,10 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
- __skb_queue_tail(&sd->tofree_queue, skb);
+ kfree_skb(skb);
input_queue_head_incr(sd);
}
}
-
- if (!skb_queue_empty(&sd->tofree_queue))
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -4115,7 +4075,6 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
} else
#endif
local_irq_enable();
- preempt_check_resched_rt();
}
static int process_backlog(struct napi_struct *napi, int quota)
@@ -4188,7 +4147,6 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
- preempt_check_resched_rt();
}
EXPORT_SYMBOL(__napi_schedule);
@@ -4318,17 +4276,10 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
- struct sk_buff *skb;
void *have;
local_irq_disable();
- while ((skb = __skb_dequeue(&sd->tofree_queue))) {
- local_irq_enable();
- kfree_skb(skb);
- local_irq_disable();
- }
-
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
@@ -6447,7 +6398,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
- preempt_check_resched_rt();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
@@ -6458,9 +6408,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
- kfree_skb(skb);
- }
return NOTIFY_OK;
}
@@ -6772,9 +6719,8 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
- skb_queue_head_init_raw(&sd->input_pkt_queue);
- skb_queue_head_init_raw(&sd->process_queue);
- skb_queue_head_init_raw(&sd->tofree_queue);
+ skb_queue_head_init(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue = NULL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 462cdc9..9b40f23 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -740,7 +740,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
struct nd_msg *msg;
struct ipv6hdr *hdr;
- if (skb->protocol != htons(ETH_P_ARP))
+ if (skb->protocol != htons(ETH_P_IPV6))
return false;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
return false;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2a0e21d..37b492e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
u8 *addr, u32 pid, u32 seq,
- int type, unsigned int flags)
+ int type, unsigned int flags,
+ int nlflags)
{
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
if (!nlh)
return -EMSGSIZE;
@@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
if (!skb)
goto errout;
- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
portid, seq,
- RTM_NEWNEIGH, NTF_SELF);
+ RTM_NEWNEIGH, NTF_SELF,
+ NLM_F_MULTI);
if (err < 0)
return err;
skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da24627..21571dc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,7 +62,6 @@
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
-#include <linux/locallock.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -335,7 +334,6 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -344,7 +342,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
int order;
unsigned long flags;
- local_lock_irqsave(netdev_alloc_lock, flags);
+ local_irq_save(flags);
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
@@ -378,7 +376,7 @@ recycle:
nc->frag.offset += fragsz;
nc->pagecnt_bias--;
end:
- local_unlock_irqrestore(netdev_alloc_lock, flags);
+ local_irq_restore(flags);
return data;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index e57770c..ec228a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2339,11 +2339,12 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock_bh(&sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+ local_bh_enable();
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -2358,10 +2359,13 @@ void release_sock(struct sock *sk)
if (sk->sk_backlog.tail)
__release_sock(sk);
+ /* Warning : release_cb() might need to release sk ownership,
+ * ie call sock_release_ownership(sk) before us.
+ */
if (sk->sk_prot->release_cb)
sk->sk_prot->release_cb(sk);
- sk->sk_lock.owned = 0;
+ sock_release_ownership(sk);
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
spin_unlock_bh(&sk->sk_lock.slock);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 736c9fc3..0c0c1f0 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -211,6 +211,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
int i;
bool csum_err = false;
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+ /* Looped back packet, drop it! */
+ if (rt_is_output_route(skb_rtable(skb)))
+ goto drop;
+ }
+#endif
+
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1f0c7e0..5f7d11a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -69,7 +69,6 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
-#include <linux/sysrq.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
@@ -777,30 +776,6 @@ static void icmp_redirect(struct sk_buff *skb)
}
/*
- * 32bit and 64bit have different timestamp length, so we check for
- * the cookie at offset 20 and verify it is repeated at offset 50
- */
-#define CO_POS0 20
-#define CO_POS1 50
-#define CO_SIZE sizeof(int)
-#define ICMP_SYSRQ_SIZE 57
-
-/*
- * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
- * pattern and if it matches send the next byte as a trigger to sysrq.
- */
-static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
-{
- int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
- char *p = skb->data;
-
- if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
- !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
- p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
- handle_sysrq(p[CO_POS0 + CO_SIZE]);
-}
-
-/*
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
@@ -827,11 +802,6 @@ static void icmp_echo(struct sk_buff *skb)
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
-
- if (skb->len == ICMP_SYSRQ_SIZE &&
- net->ipv4.sysctl_icmp_echo_sysrq) {
- icmp_check_sysrq(net, skb);
- }
}
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c5313a9..12b80fb 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -211,7 +211,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
}
work = frag_mem_limit(nf) - nf->low_thresh;
- while (work > 0) {
+ while (work > 0 || force) {
spin_lock(&nf->lru_lock);
if (list_empty(&nf->lru_list)) {
@@ -281,9 +281,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
+ inet_frag_lru_add(nf, qp);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
- inet_frag_lru_add(nf, qp);
+
return qp;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8bb3b4a..3982eab 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -79,7 +79,6 @@
#include <linux/mroute.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
-#include <linux/locallock.h>
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
@@ -1469,9 +1468,6 @@ static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
.uc_ttl = -1,
};
-/* serialize concurrent calls on the same CPU to ip_send_unicast_reply */
-static DEFINE_LOCAL_IRQ_LOCK(unicast_lock);
-
void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg,
unsigned int len)
@@ -1509,7 +1505,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
if (IS_ERR(rt))
return;
- inet = &get_locked_var(unicast_lock, unicast_sock);
+ inet = &get_cpu_var(unicast_sock);
inet->tos = arg->tos;
sk = &inet->sk;
@@ -1533,7 +1529,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
ip_push_pending_frames(sk, &fl4);
}
- put_locked_var(unicast_lock, unicast_sock);
+ put_cpu_var(unicast_sock);
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 995a0bb..3bedb26 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -411,9 +411,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
- /* Looped back packet, drop it! */
- if (rt_is_output_route(skb_rtable(skb)))
- goto drop;
tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index ba22cc3..c31e3ad 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -109,6 +109,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
secpath_reset(skb);
if (!skb->l4_rxhash)
skb->rxhash = 0;
+ skb_dst_drop(skb);
skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0);
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1672409..6fbf339 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2253,13 +2253,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
}
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2327,7 +2328,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
if (skb == NULL)
goto errout;
- err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2366,7 +2367,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2380,7 +2382,8 @@ next_entry:
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c482f7c..c211607 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -249,26 +249,33 @@ int ping_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
kgid_t group = current_egid();
- struct group_info *group_info = get_current_groups();
- int i, j, count = group_info->ngroups;
+ struct group_info *group_info;
+ int i, j, count;
kgid_t low, high;
+ int ret = 0;
inet_get_ping_group_range_net(net, &low, &high);
if (gid_lte(low, group) && gid_lte(group, high))
return 0;
+ group_info = get_current_groups();
+ count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
for (j = 0; j < cp_count; j++) {
kgid_t gid = group_info->blocks[i][j];
if (gid_lte(low, gid) && gid_lte(gid, high))
- return 0;
+ goto out_release_group;
}
count -= cp_count;
}
- return -EACCES;
+ ret = -EACCES;
+
+out_release_group:
+ put_group_info(group_info);
+ return ret;
}
EXPORT_SYMBOL_GPL(ping_init_sock);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 99461ed..540279f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -812,13 +812,6 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "icmp_echo_sysrq",
- .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e088932..826fc6f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -765,6 +765,17 @@ void tcp_release_cb(struct sock *sk)
if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
+ /* Here begins the tricky part :
+ * We are called from release_sock() with :
+ * 1) BH disabled
+ * 2) sk_lock.slock spinlock held
+ * 3) socket owned by us (sk->sk_lock.owned == 1)
+ *
+ * But following code is meant to be called from BH handlers,
+ * so we should keep BH disabled, but early release socket ownership
+ */
+ sock_release_ownership(sk);
+
if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
tcp_write_timer_handler(sk);
__sock_put(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cd3fb30..5dac9fd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1079,8 +1079,11 @@ retry:
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
* an implementation must not create a temporary address with a zero
* Preferred Lifetime.
+ * Use age calculation as in addrconf_verify to avoid unnecessary
+ * temporary addresses being generated.
*/
- if (tmp_prefered_lft <= regen_advance) {
+ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+ if (tmp_prefered_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index cf77f3a..447a7fb 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
int ret;
ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
- if (!ret)
+ if (ret)
goto out;
ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
- if (!ret)
+ if (ret)
goto out_rt;
out:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index eef8d94..e2c9ff8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
np->tclass, NULL, &fl6, (struct rt6_info *)dst,
MSG_DONTWAIT, np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 68fd491..516e136 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1088,21 +1088,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt,
- bool pmtuprobe)
+ unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
- *mtu = *mtu - rt->dst.header_len;
+ *mtu = orig_mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
- *mtu = min(*mtu, pmtuprobe ?
- rt->dst.dev->mtu :
- dst_mtu(rt->dst.path));
+ *mtu = orig_mtu;
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
@@ -1119,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1201,6 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
dst_exthdrlen = 0;
mtu = cork->fragsize;
}
+ orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1280,8 +1279,7 @@ alloc_new_skb:
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
- np->pmtudisc ==
- IPV6_PMTUDISC_PROBE);
+ orig_mtu);
skb_prev = skb;
@@ -1537,8 +1535,8 @@ int ip6_push_pending_frames(struct sock *sk)
if (proto == IPPROTO_ICMPV6) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
err = ip6_local_out(skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0eb4038..8737400 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
}
static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
if (skb == NULL)
goto errout;
- err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2476,7 +2478,8 @@ next_entry:
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d18f9f9..d81abd5 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
dst_output);
out:
if (!err) {
- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
- } else
- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+ } else {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ }
rcu_read_unlock();
return;
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 7856e96..6acab0b 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
MSG_DONTWAIT, np->dontfrag);
if (err) {
- ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
- ICMP6_MIB_OUTERRORS);
+ ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
+ ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0accb13..77f81be 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1500,7 +1500,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
if (!rt) {
err = -ENOMEM;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 62fba17..1e5bd0d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3298,7 +3298,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- WARN_ON_ONCE_NONRT(softirq_count() == 0);
+ WARN_ON_ONCE(softirq_count() == 0);
if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
goto drop;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 6bd22aa..593b16e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -21,17 +21,11 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/locallock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "nf_internals.h"
-#ifdef CONFIG_PREEMPT_RT_BASE
-DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
-EXPORT_PER_CPU_SYMBOL(xt_write_lock);
-#endif
-
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 4c8e5c0..d585626 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
- ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
- cp->vport = p->vport;
- /* proto should only be IPPROTO_IP if d_addr is a fwmark */
+ /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
- &cp->daddr, daddr);
+ &cp->vaddr, p->vaddr);
+ cp->vport = p->vport;
+ ip_vs_addr_set(p->af, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index a99b6c3..59359be 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const char *msg;
u_int8_t state;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
type = dh->dccph_type;
@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
unsigned int cscov;
const char *msg;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
msg = "nf_ct_dccp: short packet ";
goto out_invalid;
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index c323567..5c2dab2 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
continue;
netdev_vport = netdev_vport_priv(vport);
- if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED ||
- netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
+ if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
dp_detach_port_notify(vport);
}
}
@@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER) {
+ /* upper_dev_unlink and decrement promisc immediately */
+ ovs_netdev_detach_dev(vport);
+
+ /* schedule vport destroy, dev_put and genl notification */
ovs_net = net_generic(dev_net(dev), ovs_net_id);
queue_work(system_wq, &ovs_net->dp_notify_work);
}
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 09d93c1..d21f77d 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu)
ovs_vport_free(vport_from_priv(netdev_vport));
}
-static void netdev_destroy(struct vport *vport)
+void ovs_netdev_detach_dev(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- rtnl_lock();
+ ASSERT_RTNL();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
- netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+ netdev_upper_dev_unlink(netdev_vport->dev,
+ netdev_master_upper_dev_get(netdev_vport->dev));
dev_set_promiscuity(netdev_vport->dev, -1);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+ rtnl_lock();
+ if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ ovs_netdev_detach_dev(vport);
rtnl_unlock();
call_rcu(&netdev_vport->rcu, free_port_rcu);
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index dd298b5..8df01c11 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport)
}
const char *ovs_netdev_get_name(const struct vport *);
+void ovs_netdev_detach_dev(struct vport *);
#endif /* vport_netdev.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d1705d0..88cfbc1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -63,7 +63,6 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -631,7 +630,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
- cpu_chill();
+ cpu_relax();
}
}
@@ -882,7 +881,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
- cpu_chill();
+ cpu_relax();
}
}
prb_close_block(pkc, pbd, po, status);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 5a44c6e..e8fdb17 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -34,7 +34,6 @@
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/llist.h>
-#include <linux/delay.h>
#include "rds.h"
#include "ib.h"
@@ -287,7 +286,7 @@ static inline void wait_clean_list_grace(void)
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- cpu_chill();
+ cpu_relax();
}
}
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 7826d46..5899356 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support IB devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_RNIC)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 2e55f81..52229f9 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -577,9 +577,11 @@ static void fq_rehash(struct fq_sched_data *q,
q->stat_gc_flows += fcnt;
}
-static int fq_resize(struct fq_sched_data *q, u32 log)
+static int fq_resize(struct Qdisc *sch, u32 log)
{
+ struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
+ void *old_fq_root;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
@@ -592,13 +594,19 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
- if (q->fq_root) {
- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- kfree(q->fq_root);
- }
+ sch_tree_lock(sch);
+
+ old_fq_root = q->fq_root;
+ if (old_fq_root)
+ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
+
q->fq_root = array;
q->fq_trees_log = log;
+ sch_tree_unlock(sch);
+
+ kfree(old_fq_root);
+
return 0;
}
@@ -674,9 +682,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
- if (!err)
- err = fq_resize(q, fq_log);
-
+ if (!err) {
+ sch_tree_unlock(sch);
+ err = fq_resize(sch, fq_log);
+ sch_tree_lock(sch);
+ }
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);
@@ -722,7 +732,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
if (opt)
err = fq_change(sch, opt);
else
- err = fq_resize(q, q->fq_trees_log);
+ err = fq_resize(sch, q->fq_trees_log);
return err;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 1dcebc9..a74e278 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -850,7 +850,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, unreg_list)
while (some_qdisc_is_busy(dev))
- msleep(1);
+ yield();
}
void dev_deactivate(struct net_device *dev)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d244a23..26be077 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1433,8 +1433,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
- /* Free the chunk skb data and the SCTP_chunk stub itself. */
- dev_kfree_skb(chunk->skb);
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
SCTP_DBG_OBJCNT_DEC(chunk);
kmem_cache_free(sctp_chunk_cachep, chunk);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 56ebe71..0a5f050 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -761,7 +761,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
/* Make sure that we and the peer are AUTH capable */
if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
- kfree_skb(chunk->auth_chunk);
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
@@ -776,10 +775,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-
- /* We can now safely free the auth_chunk clone */
- kfree_skb(chunk->auth_chunk);
-
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
diff --git a/net/socket.c b/net/socket.c
index e83c416..dc57dae 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1972,6 +1972,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
+
+ if (kmsg->msg_namelen < 0)
+ return -EINVAL;
+
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index a7f9821..bb035f8 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1508,7 +1508,7 @@ out:
static int
gss_refresh_null(struct rpc_task *task)
{
- return -EACCES;
+ return 0;
}
static __be32 *
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 890a299..e860d4f 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
- list_del(&req->rq_bc_pa_list);
kfree(req);
}
@@ -168,8 +167,10 @@ out_free:
/*
* Memory allocation failed, free the temporary list
*/
- list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
+ }
dprintk("RPC: setup backchannel transport failed\n");
return -ENOMEM;
@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
if (--max_reqs == 0)
break;
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c301a9a..5afe633 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
struct tipc_cfg_msg_hdr *req_hdr;
struct tipc_cfg_msg_hdr *rep_hdr;
struct sk_buff *rep_buf;
- int ret;
/* Validate configuration message header (ignore invalid message) */
req_hdr = (struct tipc_cfg_msg_hdr *)buf;
@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
rep_hdr->tcm_len = htonl(rep_buf->len);
rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
-
- ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
- rep_buf->len);
- if (ret < 0)
- pr_err("Sending cfg reply message failed, no memory\n");
-
+ tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+ rep_buf->len);
kfree_skb(rep_buf);
}
}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index b36f0fc..79b991e 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -57,7 +57,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
struct queue_item *item;
if (!handler_enabled) {
- pr_err("Signal request ignored by handler\n");
return -ENOPROTOOPT;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 09dcd54..299e45af 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -942,20 +942,51 @@ int tipc_nametbl_init(void)
return 0;
}
+/**
+ * tipc_purge_publications - remove all publications for a given type
+ *
+ * tipc_nametbl_lock must be held when calling this function
+ */
+static void tipc_purge_publications(struct name_seq *seq)
+{
+ struct publication *publ, *safe;
+ struct sub_seq *sseq;
+ struct name_info *info;
+
+ if (!seq->sseqs) {
+ nameseq_delete_empty(seq);
+ return;
+ }
+ sseq = seq->sseqs;
+ info = sseq->info;
+ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
+ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ }
+}
+
void tipc_nametbl_stop(void)
{
u32 i;
+ struct name_seq *seq;
+ struct hlist_head *seq_head;
+ struct hlist_node *safe;
if (!table.types)
return;
- /* Verify name table is empty, then release it */
+ /* Verify name table is empty and purge any lingering
+ * publications, then release the name table
+ */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
if (hlist_empty(&table.types[i]))
continue;
- pr_err("nametbl_stop(): orphaned hash chain detected\n");
- break;
+ seq_head = &table.types[i];
+ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ tipc_purge_publications(seq);
+ }
+ continue;
}
kfree(table.types);
table.types = NULL;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index fd3fa57..bd2336a 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct tipc_server *s = con->server;
if (con->sock) {
tipc_sock_release_local(con->sock);
@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
}
tipc_clean_outqueues(con);
-
- if (con->conid)
- s->tipc_conn_shutdown(con->conid, con->usr_data);
-
kfree(con);
}
@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
struct tipc_server *s = con->server;
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ if (con->conid)
+ s->tipc_conn_shutdown(con->conid, con->usr_data);
+
spin_lock_bh(&s->idr_lock);
idr_remove(&s->conn_idr, con->conid);
s->idr_in_use--;
@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags))
+ if (test_bit(CF_CONNECTED, &con->flags)) {
if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
-
+ } else {
+ conn_put(con);
+ }
return 0;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index d38bb45..c2a37aa 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
{
struct tipc_subscriber *subscriber = sub->subscriber;
struct kvec msg_sect;
- int ret;
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
-
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
- ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
- msg_sect.iov_base, msg_sect.iov_len);
- if (ret < 0)
- pr_err("Sending subscription event failed, no memory\n");
+ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
+ msg_sect.iov_len);
}
/**
@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
/* The spin lock per subscriber is used to protect its members */
spin_lock_bh(&subscriber->lock);
- /* Validate if the connection related to the subscriber is
- * closed (in case subscriber is terminating)
- */
- if (subscriber->conid == 0) {
- spin_unlock_bh(&subscriber->lock);
- return;
- }
-
/* Validate timeout (in case subscription is being cancelled) */
if (sub->timeout == TIPC_WAIT_FOREVER) {
spin_unlock_bh(&subscriber->lock);
@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
spin_lock_bh(&subscriber->lock);
- /* Invalidate subscriber reference */
- subscriber->conid = 0;
-
/* Destroy any existing subscriptions for subscriber */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
@@ -278,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
*
* Called with subscriber lock held.
*/
-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
- struct tipc_subscriber *subscriber)
-{
+static int subscr_subscribe(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber,
+ struct tipc_subscription **sub_p) {
struct tipc_subscription *sub;
int swap;
@@ -291,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
- return NULL;
+ return 0;
}
/* Refuse subscription if global limit exceeded */
if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
pr_warn("Subscription rejected, limit reached (%u)\n",
TIPC_MAX_SUBSCRIPTIONS);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
pr_warn("Subscription rejected, no memory\n");
- subscr_terminate(subscriber);
- return NULL;
+ return -ENOMEM;
}
/* Initialize subscription object */
@@ -321,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
(sub->seq.lower > sub->seq.upper)) {
pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
@@ -335,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
(Handler)subscr_timeout, (unsigned long)sub);
k_start_timer(&sub->timer, sub->timeout);
}
-
- return sub;
+ *sub_p = sub;
+ return 0;
}
/* Handle one termination request for the subscriber */
@@ -350,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
void *usr_data, void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
- struct tipc_subscription *sub;
+ struct tipc_subscription *sub = NULL;
spin_lock_bh(&subscriber->lock);
- sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
+ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+ spin_unlock_bh(&subscriber->lock);
+ subscr_terminate(subscriber);
+ return;
+ }
if (sub)
tipc_nametbl_subscribe(sub);
spin_unlock_bh(&subscriber->lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d7c1ac6..c3975bc 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1785,8 +1785,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
@@ -1911,6 +1914,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = msg->msg_name;
int copied = 0;
+ int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
@@ -1926,7 +1930,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+ timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@@ -1938,8 +1942,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
err = mutex_lock_interruptible(&u->readlock);
- if (err) {
- err = sock_intr_errno(timeo);
+ if (unlikely(err)) {
+ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+ */
+ err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 5f44009..f221ddf 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -4,8 +4,7 @@ TARGET=$1
ARCH=$2
SMP=$3
PREEMPT=$4
-RT=$5
-CC=$6
+CC=$5
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
@@ -58,7 +57,6 @@ UTS_VERSION="#$VERSION"
CONFIG_FLAGS=""
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
-if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 90e521f..c1bb9be 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -41,9 +41,9 @@ create_package() {
parisc*)
debarch=hppa ;;
mips*)
- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
+ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
arm*)
- debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
+ debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
*)
echo "" >&2
echo "** ** ** WARNING ** ** **" >&2
@@ -62,7 +62,7 @@ create_package() {
fi
# Create the package
- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
+ dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
dpkg --build "$pdir" ..
}
@@ -288,15 +288,14 @@ mkdir -p "$destdir"
(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-arch=$(dpkg --print-architecture)
cat <<EOF >> debian/control
Package: $kernel_headers_packagename
Provides: linux-headers, linux-headers-2.6
-Architecture: $arch
-Description: Linux kernel headers for $KERNELRELEASE on $arch
- This package provides kernel header files for $KERNELRELEASE on $arch
+Architecture: any
+Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
+ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
.
This is useful for people who need to build external modules
EOF
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 25d5eba..630b8ad 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1386,15 +1386,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
isec->sid = sbsec->sid;
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
- if (opt_dentry) {
- isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = selinux_proc_get_sid(opt_dentry,
- isec->sclass,
- &sid);
- if (rc)
- goto out_unlock;
- isec->sid = sid;
- }
+ /* We must have a dentry to determine the label on
+ * procfs inodes */
+ if (opt_dentry)
+ /* Called from d_instantiate or
+ * d_splice_alias. */
+ dentry = dget(opt_dentry);
+ else
+ /* Called from selinux_complete_init, try to
+ * find a dentry. */
+ dentry = d_find_alias(inode);
+ /*
+ * This can be hit on boot when a file is accessed
+ * before the policy is loaded. When we load policy we
+ * may find inodes that have no dentry on the
+ * sbsec->isec_head list. No reason to complain as
+ * these will get fixed up the next time we go through
+ * inode_doinit() with a dentry, before these inodes
+ * could be used again by userspace.
+ */
+ if (!dentry)
+ goto out_unlock;
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
+ dput(dentry);
+ if (rc)
+ goto out_unlock;
+ isec->sid = sid;
}
break;
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index d9af638..dac296a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
kfree(data);
}
snd_card_unref(compr->card);
- return 0;
+ return ret;
}
static int snd_compr_free(struct inode *inode, struct file *f)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 7d4ccfa..31da88b 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3264,7 +3264,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
mutex_unlock(&codec->control_mutex);
snd_hda_codec_flush_cache(codec); /* flush the updates */
if (err >= 0 && spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return err;
}
@@ -3385,7 +3385,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
return ret;
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return ret;
}
@@ -3790,7 +3790,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 0;
snd_hda_activate_path(codec, path, true, false);
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
path_power_down_sync(codec, old_path);
return 1;
}
@@ -5233,7 +5233,7 @@ static void init_input_src(struct hda_codec *codec)
}
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
}
/* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 0929a06..a1095de 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@ struct hda_gen_spec {
void (*init_hook)(struct hda_codec *codec);
void (*automute_hook)(struct hda_codec *codec);
void (*cap_sync_hook)(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index ccf5eb6..b5c4c2e 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -4007,6 +4007,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
{ PCI_DEVICE(0x8086, 0x0d0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+ /* Broadwell */
+ { PCI_DEVICE(0x8086, 0x160c),
+ .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 96f07ce..fde381d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3282,7 +3282,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
}
static void cxt_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
cxt_update_headset_mode(codec);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index adb374b..23e0bc6 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -45,6 +45,9 @@ module_param(static_hdmi_pcm, bool, 0644);
MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
#define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
+#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
+#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
+
#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
struct hdmi_spec_per_cvt {
@@ -1014,7 +1017,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
if (!channels)
return;
- if (is_haswell(codec))
+ if (is_haswell_plus(codec))
snd_hda_codec_write(codec, pin_nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
@@ -1196,7 +1199,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
int pinctl;
int new_pinctl = 0;
- if (is_haswell(codec))
+ if (is_haswell_plus(codec))
haswell_verify_D0(codec, cvt_nid, pin_nid);
if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
@@ -1357,7 +1360,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
mux_idx);
/* configure unused pins to choose other converters */
- if (is_haswell(codec) || is_valleyview(codec))
+ if (is_haswell_plus(codec) || is_valleyview(codec))
intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
@@ -1543,7 +1546,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
return 0;
- if (is_haswell(codec))
+ if (is_haswell_plus(codec))
intel_haswell_fixup_connect_list(codec, pin_nid);
pin_idx = spec->num_pins;
@@ -2169,7 +2172,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
codec->spec = spec;
hdmi_array_init(spec, 4);
- if (is_haswell(codec)) {
+ if (is_haswell_plus(codec)) {
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
}
@@ -2180,7 +2183,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
return -EINVAL;
}
codec->patch_ops = generic_hdmi_patch_ops;
- if (is_haswell(codec)) {
+ if (is_haswell_plus(codec)) {
codec->patch_ops.set_power_state = haswell_set_power_state;
codec->dp_mst = true;
}
@@ -2846,6 +2849,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
{ .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
@@ -2901,6 +2905,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862804");
MODULE_ALIAS("snd-hda-codec-id:80862805");
MODULE_ALIAS("snd-hda-codec-id:80862806");
MODULE_ALIAS("snd-hda-codec-id:80862807");
+MODULE_ALIAS("snd-hda-codec-id:80862808");
MODULE_ALIAS("snd-hda-codec-id:80862880");
MODULE_ALIAS("snd-hda-codec-id:80862882");
MODULE_ALIAS("snd-hda-codec-id:808629fb");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index deddee9..6a32c85 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -695,7 +695,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
}
static void alc_inv_dmic_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_inv_dmic_sync(codec, false);
}
@@ -3141,7 +3142,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
/* turn on/off mic-mute LED per capture hook */
static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct alc_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
@@ -3403,7 +3405,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
}
static void alc_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_update_headset_mode(codec);
}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6133423..d761c0b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -195,7 +195,7 @@ struct sigmatel_spec {
int default_polarity;
unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
- bool mic_mute_led_on; /* current mic mute state */
+ unsigned int mic_enabled; /* current mic mute state (bitmask) */
/* stream */
unsigned int stream_delay;
@@ -325,19 +325,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
/* hook for controlling mic-mute LED GPIO */
static void stac_capture_led_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct sigmatel_spec *spec = codec->spec;
- bool mute;
+ unsigned int mask;
+ bool cur_mute, prev_mute;
- if (!ucontrol)
+ if (!kcontrol || !ucontrol)
return;
- mute = !(ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
- if (spec->mic_mute_led_on != mute) {
- spec->mic_mute_led_on = mute;
- if (mute)
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ prev_mute = !spec->mic_enabled;
+ if (ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1])
+ spec->mic_enabled |= mask;
+ else
+ spec->mic_enabled &= ~mask;
+ cur_mute = !spec->mic_enabled;
+ if (cur_mute != prev_mute) {
+ if (cur_mute)
spec->gpio_data |= spec->mic_mute_led_gpio;
else
spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -3974,7 +3981,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
if (spec->mic_mute_led_gpio) {
spec->gpio_mask |= spec->mic_mute_led_gpio;
spec->gpio_dir |= spec->mic_mute_led_gpio;
- spec->mic_mute_led_on = true;
+ spec->mic_enabled = 0;
spec->gpio_data |= spec->mic_mute_led_gpio;
spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 5f72880..8bddf3f 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
case M98090_REG_RECORD_TDM_SLOT:
case M98090_REG_SAMPLE_RATE:
case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+ case M98090_REG_REVISION_ID:
return true;
default:
return false;
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 2d68297..39dc5bc 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
ioapic->rtc_status.dest_map);
- ioapic->rtc_status.pending_eoi = ret;
+ ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);